Spaces:
Running
on
Zero
Running
on
Zero
first
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +41 -35
- .gitignore +7 -0
- app.py +388 -7
- requirements.txt +36 -0
- test.ipynb +78 -0
- tools/__init__.py +0 -0
- tools/asr/config.py +33 -0
- tools/asr/fasterwhisper_asr.py +114 -0
- tools/asr/funasr_asr.py +79 -0
- tools/asr/models/.gitignore +2 -0
- tools/cmd-denoise.py +33 -0
- tools/denoise-model/.gitignore +2 -0
- tools/i18n/i18n.py +30 -0
- tools/i18n/locale/en_US.json +172 -0
- tools/i18n/locale/es_ES.json +172 -0
- tools/i18n/locale/fr_FR.json +172 -0
- tools/i18n/locale/it_IT.json +172 -0
- tools/i18n/locale/ja_JP.json +172 -0
- tools/i18n/locale/ko_KR.json +172 -0
- tools/i18n/locale/pt_BR.json +172 -0
- tools/i18n/locale/ru_RU.json +172 -0
- tools/i18n/locale/tr_TR.json +172 -0
- tools/i18n/locale/zh_CN.json +172 -0
- tools/i18n/locale/zh_HK.json +172 -0
- tools/i18n/locale/zh_SG.json +172 -0
- tools/i18n/locale/zh_TW.json +172 -0
- tools/i18n/scan_i18n.py +119 -0
- tools/my_utils.py +33 -0
- tools/slice_audio.py +48 -0
- tools/slicer2.py +261 -0
- tools/subfix_webui.py +498 -0
- tools/uvr5/bs_roformer/__init__.py +0 -0
- tools/uvr5/bs_roformer/attend.py +120 -0
- tools/uvr5/bs_roformer/bs_roformer.py +583 -0
- tools/uvr5/bsroformer.py +216 -0
- tools/uvr5/lib/lib_v5/dataset.py +183 -0
- tools/uvr5/lib/lib_v5/layers.py +118 -0
- tools/uvr5/lib/lib_v5/layers_123812KB.py +118 -0
- tools/uvr5/lib/lib_v5/layers_123821KB.py +118 -0
- tools/uvr5/lib/lib_v5/layers_33966KB.py +126 -0
- tools/uvr5/lib/lib_v5/layers_537227KB.py +126 -0
- tools/uvr5/lib/lib_v5/layers_537238KB.py +126 -0
- tools/uvr5/lib/lib_v5/layers_new.py +125 -0
- tools/uvr5/lib/lib_v5/model_param_init.py +69 -0
- tools/uvr5/lib/lib_v5/modelparams/1band_sr16000_hl512.json +19 -0
- tools/uvr5/lib/lib_v5/modelparams/1band_sr32000_hl512.json +19 -0
- tools/uvr5/lib/lib_v5/modelparams/1band_sr33075_hl384.json +19 -0
- tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl1024.json +19 -0
- tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl256.json +19 -0
- tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl512.json +19 -0
.gitattributes
CHANGED
@@ -1,35 +1,41 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
userdic.csv filter=lfs diff=lfs merge=lfs -text
|
37 |
+
*.md5 filter=lfs diff=lfs merge=lfs -text
|
38 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
39 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
40 |
+
*.csv filter=lfs diff=lfs merge=lfs -text
|
41 |
+
*.dic filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
GPT_weights
|
3 |
+
SoVITS_weights
|
4 |
+
TEMP
|
5 |
+
s2D2333k.pth
|
6 |
+
s2G2333k.pth
|
7 |
+
s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt
|
app.py
CHANGED
@@ -1,7 +1,388 @@
|
|
1 |
-
import
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
cnhubert_base_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base"
|
4 |
+
bert_path = "GPT_SoVITS\pretrained_models\chinese-roberta-wwm-ext-large"
|
5 |
+
os.environ["version"] = 'v2'
|
6 |
+
now_dir = os.path.dirname(os.path.abspath(__file__))
|
7 |
+
sys.path.insert(0, now_dir)
|
8 |
+
sys.path.insert(0, os.path.join(now_dir, "GPT_SoVITS"))
|
9 |
+
sys.path.insert(0, os.path.join(now_dir, "GPT_SoVITS",'text'))
|
10 |
+
import site
|
11 |
+
site_packages_roots = []
|
12 |
+
for site_packages_root in site_packages_roots:
|
13 |
+
if os.path.exists(site_packages_root):
|
14 |
+
try:
|
15 |
+
with open("%s/users.pth" % (site_packages_root), "w") as f:
|
16 |
+
f.write(
|
17 |
+
"%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5"
|
18 |
+
% (now_dir, now_dir, now_dir, now_dir, now_dir)
|
19 |
+
)
|
20 |
+
break
|
21 |
+
except PermissionError:
|
22 |
+
pass
|
23 |
+
import re
|
24 |
+
import gradio as gr
|
25 |
+
from transformers import AutoModelForMaskedLM, AutoTokenizer
|
26 |
+
import numpy as np
|
27 |
+
import os,librosa,torch, audiosegment
|
28 |
+
from GPT_SoVITS.feature_extractor import cnhubert
|
29 |
+
cnhubert.cnhubert_base_path=cnhubert_base_path
|
30 |
+
from GPT_SoVITS.module.models import SynthesizerTrn
|
31 |
+
from GPT_SoVITS.AR.models.t2s_lightning_module import Text2SemanticLightningModule
|
32 |
+
from GPT_SoVITS.text import cleaned_text_to_sequence
|
33 |
+
from GPT_SoVITS.text.cleaner import clean_text
|
34 |
+
from time import time as ttime
|
35 |
+
from GPT_SoVITS.module.mel_processing import spectrogram_torch
|
36 |
+
import tempfile
|
37 |
+
from tools.my_utils import load_audio
|
38 |
+
import os
|
39 |
+
import json
|
40 |
+
# import pyopenjtalk
|
41 |
+
# cwd = os.getcwd()
|
42 |
+
# if os.path.exists(os.path.join(cwd,'user.dic')):
|
43 |
+
# pyopenjtalk.update_global_jtalk_with_user_dict(os.path.join(cwd, 'user.dic'))
|
44 |
+
|
45 |
+
|
46 |
+
import logging
|
47 |
+
logging.getLogger('httpx').setLevel(logging.WARNING)
|
48 |
+
logging.getLogger('httpcore').setLevel(logging.WARNING)
|
49 |
+
logging.getLogger('multipart').setLevel(logging.WARNING)
|
50 |
+
|
51 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
52 |
+
#device = "cpu"
|
53 |
+
is_half = False
|
54 |
+
|
55 |
+
tokenizer = AutoTokenizer.from_pretrained(bert_path)
|
56 |
+
bert_model=AutoModelForMaskedLM.from_pretrained(bert_path)
|
57 |
+
if(is_half==True):bert_model=bert_model.half().to(device)
|
58 |
+
else:bert_model=bert_model.to(device)
|
59 |
+
# bert_model=bert_model.to(device)
|
60 |
+
def get_bert_feature(text, word2ph): # Bert(不是HuBERT的特征计算)
|
61 |
+
with torch.no_grad():
|
62 |
+
inputs = tokenizer(text, return_tensors="pt")
|
63 |
+
for i in inputs:
|
64 |
+
inputs[i] = inputs[i].to(device)#####输入是long不用管精度问题,精度随bert_model
|
65 |
+
res = bert_model(**inputs, output_hidden_states=True)
|
66 |
+
res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
|
67 |
+
assert len(word2ph) == len(text)
|
68 |
+
phone_level_feature = []
|
69 |
+
for i in range(len(word2ph)):
|
70 |
+
repeat_feature = res[i].repeat(word2ph[i], 1)
|
71 |
+
phone_level_feature.append(repeat_feature)
|
72 |
+
phone_level_feature = torch.cat(phone_level_feature, dim=0)
|
73 |
+
# if(is_half==True):phone_level_feature=phone_level_feature.half()
|
74 |
+
return phone_level_feature.T
|
75 |
+
|
76 |
+
loaded_sovits_model = [] # [(path, dict, model)]
|
77 |
+
loaded_gpt_model = []
|
78 |
+
ssl_model = cnhubert.get_model()
|
79 |
+
if (is_half == True):
|
80 |
+
ssl_model = ssl_model.half().to(device)
|
81 |
+
else:
|
82 |
+
ssl_model = ssl_model.to(device)
|
83 |
+
|
84 |
+
|
85 |
+
def load_model(sovits_path, gpt_path):
|
86 |
+
global ssl_model
|
87 |
+
global loaded_sovits_model
|
88 |
+
global loaded_gpt_model
|
89 |
+
vq_model = None
|
90 |
+
t2s_model = None
|
91 |
+
dict_s2 = None
|
92 |
+
dict_s1 = None
|
93 |
+
hps = None
|
94 |
+
for path, dict_s2_, model in loaded_sovits_model:
|
95 |
+
if path == sovits_path:
|
96 |
+
vq_model = model
|
97 |
+
dict_s2 = dict_s2_
|
98 |
+
break
|
99 |
+
for path, dict_s1_, model in loaded_gpt_model:
|
100 |
+
if path == gpt_path:
|
101 |
+
t2s_model = model
|
102 |
+
dict_s1 = dict_s1_
|
103 |
+
break
|
104 |
+
|
105 |
+
if dict_s2 is None:
|
106 |
+
dict_s2 = torch.load(sovits_path, map_location="cpu")
|
107 |
+
hps = dict_s2["config"]
|
108 |
+
|
109 |
+
if dict_s1 is None:
|
110 |
+
dict_s1 = torch.load(gpt_path, map_location="cpu")
|
111 |
+
config = dict_s1["config"]
|
112 |
+
class DictToAttrRecursive:
|
113 |
+
def __init__(self, input_dict):
|
114 |
+
for key, value in input_dict.items():
|
115 |
+
if isinstance(value, dict):
|
116 |
+
# 如果值是字典,递归调用构造函数
|
117 |
+
setattr(self, key, DictToAttrRecursive(value))
|
118 |
+
else:
|
119 |
+
setattr(self, key, value)
|
120 |
+
|
121 |
+
hps = DictToAttrRecursive(hps)
|
122 |
+
hps.model.semantic_frame_rate = "25hz"
|
123 |
+
|
124 |
+
|
125 |
+
if not vq_model:
|
126 |
+
vq_model = SynthesizerTrn(
|
127 |
+
hps.data.filter_length // 2 + 1,
|
128 |
+
hps.train.segment_size // hps.data.hop_length,
|
129 |
+
n_speakers=hps.data.n_speakers,
|
130 |
+
**hps.model)
|
131 |
+
if (is_half == True):
|
132 |
+
vq_model = vq_model.half().to(device)
|
133 |
+
else:
|
134 |
+
vq_model = vq_model.to(device)
|
135 |
+
vq_model.eval()
|
136 |
+
vq_model.load_state_dict(dict_s2["weight"], strict=False)
|
137 |
+
loaded_sovits_model.append((sovits_path, dict_s2, vq_model))
|
138 |
+
hz = 50
|
139 |
+
max_sec = config['data']['max_sec']
|
140 |
+
if not t2s_model:
|
141 |
+
t2s_model = Text2SemanticLightningModule(config, "ojbk", is_train=False)
|
142 |
+
t2s_model.load_state_dict(dict_s1["weight"])
|
143 |
+
if (is_half == True): t2s_model = t2s_model.half()
|
144 |
+
t2s_model = t2s_model.to(device)
|
145 |
+
t2s_model.eval()
|
146 |
+
total = sum([param.nelement() for param in t2s_model.parameters()])
|
147 |
+
print("Number of parameter: %.2fM" % (total / 1e6))
|
148 |
+
loaded_gpt_model.append((gpt_path, dict_s1, t2s_model))
|
149 |
+
return vq_model, ssl_model, t2s_model, hps, config, hz, max_sec
|
150 |
+
|
151 |
+
|
152 |
+
def get_spepc(hps, filename):
|
153 |
+
audio=load_audio(filename,int(hps.data.sampling_rate))
|
154 |
+
audio = audio / np.max(np.abs(audio))
|
155 |
+
audio=torch.FloatTensor(audio)
|
156 |
+
print(torch.max(torch.abs(audio)))
|
157 |
+
audio_norm = audio
|
158 |
+
# audio_norm = audio / torch.max(torch.abs(audio))
|
159 |
+
audio_norm = audio_norm.unsqueeze(0)
|
160 |
+
print(torch.max(torch.abs(audio_norm)))
|
161 |
+
spec = spectrogram_torch(audio_norm, hps.data.filter_length,hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length,center=False)
|
162 |
+
return spec
|
163 |
+
|
164 |
+
def create_tts_fn(vq_model, ssl_model, t2s_model, hps, config, hz, max_sec):
|
165 |
+
def tts_fn(ref_wav_path, prompt_text, prompt_language, target_phone, text_language, target_text = None):
|
166 |
+
t0 = ttime()
|
167 |
+
prompt_text=prompt_text.strip()
|
168 |
+
prompt_language=prompt_language
|
169 |
+
with torch.no_grad():
|
170 |
+
wav16k, sr = librosa.load(ref_wav_path, sr=16000) # 派蒙
|
171 |
+
# maxx=0.95
|
172 |
+
# tmp_max = np.abs(wav16k).max()
|
173 |
+
# alpha=0.5
|
174 |
+
# wav16k = (wav16k / tmp_max * (maxx * alpha*32768)) + ((1 - alpha)*32768) * wav16k
|
175 |
+
#在这里归一化
|
176 |
+
#print(max(np.abs(wav16k)))
|
177 |
+
#wav16k = wav16k / np.max(np.abs(wav16k))
|
178 |
+
#print(max(np.abs(wav16k)))
|
179 |
+
# 添加0.3s的静音
|
180 |
+
wav16k = np.concatenate([wav16k, np.zeros(int(hps.data.sampling_rate * 0.3)),])
|
181 |
+
wav16k = torch.from_numpy(wav16k)
|
182 |
+
wav16k = wav16k.float()
|
183 |
+
if(is_half==True):wav16k=wav16k.half().to(device)
|
184 |
+
else:wav16k=wav16k.to(device)
|
185 |
+
print(wav16k.shape) # 读取16k音频
|
186 |
+
ssl_content = ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2)#.float()
|
187 |
+
print(ssl_content.shape)
|
188 |
+
codes = vq_model.extract_latent(ssl_content)
|
189 |
+
print(codes.shape)
|
190 |
+
prompt_semantic = codes[0, 0]
|
191 |
+
t1 = ttime()
|
192 |
+
phones1, word2ph1, norm_text1 = clean_text(prompt_text, prompt_language)
|
193 |
+
phones1=cleaned_text_to_sequence(phones1)
|
194 |
+
#texts=text.split("\n")
|
195 |
+
audio_opt = []
|
196 |
+
zero_wav=np.zeros(int(hps.data.sampling_rate*0.3),dtype=np.float16 if is_half==True else np.float32)
|
197 |
+
phones = get_phone_from_str_list(target_phone, text_language)
|
198 |
+
for phones2 in phones:
|
199 |
+
if(len(phones2) == 0):
|
200 |
+
continue
|
201 |
+
if(len(phones2) == 1 and phones2[0] == ""):
|
202 |
+
continue
|
203 |
+
#phones2, word2ph2, norm_text2 = clean_text(text, text_language)
|
204 |
+
print(phones2)
|
205 |
+
phones2 = cleaned_text_to_sequence(phones2)
|
206 |
+
#if(prompt_language=="zh"):bert1 = get_bert_feature(norm_text1, word2ph1).to(device)
|
207 |
+
bert1 = torch.zeros((1024, len(phones1)),dtype=torch.float16 if is_half==True else torch.float32).to(device)
|
208 |
+
#if(text_language=="zh"):bert2 = get_bert_feature(norm_text2, word2ph2).to(device)
|
209 |
+
bert2 = torch.zeros((1024, len(phones2))).to(bert1)
|
210 |
+
bert = torch.cat([bert1, bert2], 1)
|
211 |
+
|
212 |
+
all_phoneme_ids = torch.LongTensor(phones1+phones2).to(device).unsqueeze(0)
|
213 |
+
bert = bert.to(device).unsqueeze(0)
|
214 |
+
all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
|
215 |
+
prompt = prompt_semantic.unsqueeze(0).to(device)
|
216 |
+
t2 = ttime()
|
217 |
+
idx = 0
|
218 |
+
cnt = 0
|
219 |
+
while idx == 0 and cnt < 2:
|
220 |
+
with torch.no_grad():
|
221 |
+
# pred_semantic = t2s_model.model.infer
|
222 |
+
pred_semantic,idx = t2s_model.model.infer_panel(
|
223 |
+
all_phoneme_ids,
|
224 |
+
all_phoneme_len,
|
225 |
+
prompt,
|
226 |
+
bert,
|
227 |
+
# prompt_phone_len=ph_offset,
|
228 |
+
top_k=config['inference']['top_k'],
|
229 |
+
early_stop_num=hz * max_sec)
|
230 |
+
t3 = ttime()
|
231 |
+
cnt+=1
|
232 |
+
if idx == 0:
|
233 |
+
return "Error: Generation failure: bad zero prediction.", None
|
234 |
+
pred_semantic = pred_semantic[:,-idx:].unsqueeze(0) # .unsqueeze(0)#mq要多unsqueeze一次
|
235 |
+
refer = get_spepc(hps, ref_wav_path)#.to(device)
|
236 |
+
if(is_half==True):refer=refer.half().to(device)
|
237 |
+
else:refer=refer.to(device)
|
238 |
+
# audio = vq_model.decode(pred_semantic, all_phoneme_ids, refer).detach().cpu().numpy()[0, 0]
|
239 |
+
audio = vq_model.decode(pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refer).detach().cpu().numpy()[0, 0]###试试重建不带上prompt部分
|
240 |
+
audio_opt.append(audio)
|
241 |
+
audio_opt.append(zero_wav)
|
242 |
+
t4 = ttime()
|
243 |
+
print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
|
244 |
+
|
245 |
+
audio = (hps.data.sampling_rate,(np.concatenate(audio_opt,0)*32768).astype(np.int16))
|
246 |
+
|
247 |
+
filename = tempfile.mktemp(suffix=".wav",prefix=f"{prompt_text[:8]}_{target_text[:8]}_")
|
248 |
+
audiosegment.from_numpy_array(audio[1], framerate=audio[0]).export(filename, format="WAV")
|
249 |
+
return "Success", (hps.data.sampling_rate,(np.concatenate(audio_opt,0)*32768).astype(np.int16)), filename
|
250 |
+
return tts_fn
|
251 |
+
|
252 |
+
|
253 |
+
def get_str_list_from_phone(text, text_language):
|
254 |
+
# raw文本过g2p得到音素列表,再转成字符串
|
255 |
+
# 注意,这里的text是一个段落,可能包含多个句子
|
256 |
+
# 段落间\n分割,音素间空格分割
|
257 |
+
texts=text.split("\n")
|
258 |
+
phone_list = []
|
259 |
+
for text in texts:
|
260 |
+
phones2, word2ph2, norm_text2 = clean_text(text, text_language)
|
261 |
+
phone_list.append(" ".join(phones2))
|
262 |
+
return "\n".join(phone_list)
|
263 |
+
|
264 |
+
def get_phone_from_str_list(str_list:str, language:str = 'ja'):
|
265 |
+
# 从音素字符串中得到音素列表
|
266 |
+
# 注意,这里的text是一个段落,可能包含多个句子
|
267 |
+
# 段落间\n分割,音素间空格分割
|
268 |
+
sentences = str_list.split("\n")
|
269 |
+
phones = []
|
270 |
+
for sentence in sentences:
|
271 |
+
phones.append(sentence.split(" "))
|
272 |
+
return phones
|
273 |
+
|
274 |
+
splits={",","。","?","!",",",".","?","!","~",":",":","—","…",}#不考虑省略号
|
275 |
+
def split(todo_text):
|
276 |
+
todo_text = todo_text.replace("……", "。").replace("——", ",")
|
277 |
+
if (todo_text[-1] not in splits): todo_text += "。"
|
278 |
+
i_split_head = i_split_tail = 0
|
279 |
+
len_text = len(todo_text)
|
280 |
+
todo_texts = []
|
281 |
+
while (1):
|
282 |
+
if (i_split_head >= len_text): break # 结尾一定有标点,所以直接跳出即可,最后一段在上次已加入
|
283 |
+
if (todo_text[i_split_head] in splits):
|
284 |
+
i_split_head += 1
|
285 |
+
todo_texts.append(todo_text[i_split_tail:i_split_head])
|
286 |
+
i_split_tail = i_split_head
|
287 |
+
else:
|
288 |
+
i_split_head += 1
|
289 |
+
return todo_texts
|
290 |
+
|
291 |
+
|
292 |
+
def change_reference_audio(prompt_text, transcripts):
|
293 |
+
return transcripts[prompt_text]
|
294 |
+
|
295 |
+
|
296 |
+
models = []
|
297 |
+
models_info = json.load(open("./models/models_info.json", "r", encoding="utf-8"))
|
298 |
+
|
299 |
+
|
300 |
+
|
301 |
+
for i, info in models_info.items():
|
302 |
+
title = info['title']
|
303 |
+
cover = info['cover']
|
304 |
+
gpt_weight = info['gpt_weight']
|
305 |
+
sovits_weight = info['sovits_weight']
|
306 |
+
example_reference = info['example_reference']
|
307 |
+
transcripts = {}
|
308 |
+
transcript_path = info["transcript_path"]
|
309 |
+
path = os.path.dirname(transcript_path)
|
310 |
+
with open(transcript_path, 'r', encoding='utf-8') as file:
|
311 |
+
for line in file:
|
312 |
+
line = line.strip().replace("\\", "/")
|
313 |
+
wav,_,_, t = line.split("|")
|
314 |
+
wav = os.path.basename(wav)
|
315 |
+
transcripts[t] = os.path.join(os.path.join(path,"reference_audio"), wav)
|
316 |
+
|
317 |
+
vq_model, ssl_model, t2s_model, hps, config, hz, max_sec = load_model(sovits_weight, gpt_weight)
|
318 |
+
|
319 |
+
|
320 |
+
models.append(
|
321 |
+
(
|
322 |
+
i,
|
323 |
+
title,
|
324 |
+
cover,
|
325 |
+
transcripts,
|
326 |
+
example_reference,
|
327 |
+
create_tts_fn(
|
328 |
+
vq_model, ssl_model, t2s_model, hps, config, hz, max_sec
|
329 |
+
)
|
330 |
+
)
|
331 |
+
)
|
332 |
+
with gr.Blocks() as app:
|
333 |
+
gr.Markdown(
|
334 |
+
"# <center> GPT-SoVITS-V2-Gakuen Idolmaster\n"
|
335 |
+
)
|
336 |
+
with gr.Tabs():
|
337 |
+
for (name, title, cover, transcripts, example_reference, tts_fn) in models:
|
338 |
+
with gr.TabItem(name):
|
339 |
+
with gr.Row():
|
340 |
+
gr.Markdown(
|
341 |
+
'<div align="center">'
|
342 |
+
f'<a><strong>{title}</strong></a>'
|
343 |
+
'</div>')
|
344 |
+
with gr.Row():
|
345 |
+
with gr.Column():
|
346 |
+
prompt_text = gr.Dropdown(
|
347 |
+
label="Transcript of the Reference Audio",
|
348 |
+
value=example_reference if example_reference in transcripts else list(transcripts.keys())[0],
|
349 |
+
choices=list(transcripts.keys())
|
350 |
+
)
|
351 |
+
inp_ref_audio = gr.Audio(
|
352 |
+
label="Reference Audio",
|
353 |
+
type="filepath",
|
354 |
+
interactive=False,
|
355 |
+
value=transcripts[example_reference] if example_reference in transcripts else list(transcripts.values())[0]
|
356 |
+
)
|
357 |
+
transcripts_state = gr.State(value=transcripts)
|
358 |
+
prompt_text.change(
|
359 |
+
fn=change_reference_audio,
|
360 |
+
inputs=[prompt_text, transcripts_state],
|
361 |
+
outputs=[inp_ref_audio]
|
362 |
+
)
|
363 |
+
prompt_language = gr.State(value="ja")
|
364 |
+
with gr.Column():
|
365 |
+
text = gr.Textbox(label="Input Text", value="こんにちは、私はあなたのAIアシスタントです。仲良くしましょうね。")
|
366 |
+
text_language = gr.Dropdown(
|
367 |
+
label="Language",
|
368 |
+
choices=["ja"],
|
369 |
+
value="ja"
|
370 |
+
)
|
371 |
+
clean_button = gr.Button("Clean Text", variant="primary")
|
372 |
+
inference_button = gr.Button("Generate", variant="primary")
|
373 |
+
cleaned_text = gr.Textbox(label="Cleaned Text")
|
374 |
+
output = gr.Audio(label="Output Audio")
|
375 |
+
output_file = gr.File(label="Output Audio File")
|
376 |
+
om = gr.Textbox(label="Output Message")
|
377 |
+
clean_button.click(
|
378 |
+
fn=get_str_list_from_phone,
|
379 |
+
inputs=[text, text_language],
|
380 |
+
outputs=[cleaned_text]
|
381 |
+
)
|
382 |
+
inference_button.click(
|
383 |
+
fn=tts_fn,
|
384 |
+
inputs=[inp_ref_audio, prompt_text, prompt_language, cleaned_text, text_language, text],
|
385 |
+
outputs=[om, output, output_file]
|
386 |
+
)
|
387 |
+
|
388 |
+
app.launch(share=True)
|
requirements.txt
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
numpy==1.23.4
|
2 |
+
scipy
|
3 |
+
tensorboard
|
4 |
+
librosa==0.9.2
|
5 |
+
numba==0.56.4
|
6 |
+
pytorch-lightning
|
7 |
+
gradio
|
8 |
+
gradio_client
|
9 |
+
ffmpeg-python
|
10 |
+
onnxruntime; sys_platform == 'darwin'
|
11 |
+
onnxruntime-gpu; sys_platform != 'darwin'
|
12 |
+
tqdm
|
13 |
+
funasr==1.0.27
|
14 |
+
cn2an
|
15 |
+
pypinyin
|
16 |
+
pyopenjtalk
|
17 |
+
g2p_en
|
18 |
+
torchaudio
|
19 |
+
modelscope==1.10.0
|
20 |
+
sentencepiece
|
21 |
+
transformers
|
22 |
+
chardet
|
23 |
+
PyYAML
|
24 |
+
psutil
|
25 |
+
jieba_fast
|
26 |
+
jieba
|
27 |
+
LangSegment>=0.2.0
|
28 |
+
wordsegment
|
29 |
+
rotary_embedding_torch
|
30 |
+
pyjyutping
|
31 |
+
g2pk2
|
32 |
+
ko_pron
|
33 |
+
opencc; sys_platform != 'linux'
|
34 |
+
opencc==1.1.1; sys_platform == 'linux'
|
35 |
+
python_mecab_ko; sys_platform != 'win32'
|
36 |
+
audiosegment
|
test.ipynb
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 3,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [
|
8 |
+
{
|
9 |
+
"data": {
|
10 |
+
"text/plain": [
|
11 |
+
"[{'string': 'ni',\n",
|
12 |
+
" 'pos': '*',\n",
|
13 |
+
" 'pos_group1': '*',\n",
|
14 |
+
" 'pos_group2': '*',\n",
|
15 |
+
" 'pos_group3': '*',\n",
|
16 |
+
" 'ctype': '*',\n",
|
17 |
+
" 'cform': '*',\n",
|
18 |
+
" 'orig': 'ニ',\n",
|
19 |
+
" 'read': 'ニ',\n",
|
20 |
+
" 'pron': 'ニ',\n",
|
21 |
+
" 'acc': 0,\n",
|
22 |
+
" 'mora_size': 1,\n",
|
23 |
+
" 'chain_rule': '*',\n",
|
24 |
+
" 'chain_flag': -1},\n",
|
25 |
+
" {'string': 'ce',\n",
|
26 |
+
" 'pos': '*',\n",
|
27 |
+
" 'pos_group1': '*',\n",
|
28 |
+
" 'pos_group2': '*',\n",
|
29 |
+
" 'pos_group3': '*',\n",
|
30 |
+
" 'ctype': '*',\n",
|
31 |
+
" 'cform': '*',\n",
|
32 |
+
" 'orig': 'セイイ',\n",
|
33 |
+
" 'read': 'セイイ',\n",
|
34 |
+
" 'pron': 'セイイ',\n",
|
35 |
+
" 'acc': 0,\n",
|
36 |
+
" 'mora_size': 3,\n",
|
37 |
+
" 'chain_rule': '*',\n",
|
38 |
+
" 'chain_flag': 1}]"
|
39 |
+
]
|
40 |
+
},
|
41 |
+
"execution_count": 3,
|
42 |
+
"metadata": {},
|
43 |
+
"output_type": "execute_result"
|
44 |
+
}
|
45 |
+
],
|
46 |
+
"source": [
|
47 |
+
"import os\n",
|
48 |
+
"import json\n",
|
49 |
+
"import pyopenjtalk\n",
|
50 |
+
"cwd = os.getcwd()\n",
|
51 |
+
"if os.path.exists(os.path.join(cwd,'user.dic')):\n",
|
52 |
+
" pyopenjtalk.update_global_jtalk_with_user_dict(os.path.join(cwd, 'user.dic'))\n",
|
53 |
+
"pyopenjtalk.run_frontend('nice')"
|
54 |
+
]
|
55 |
+
}
|
56 |
+
],
|
57 |
+
"metadata": {
|
58 |
+
"kernelspec": {
|
59 |
+
"display_name": "vits",
|
60 |
+
"language": "python",
|
61 |
+
"name": "python3"
|
62 |
+
},
|
63 |
+
"language_info": {
|
64 |
+
"codemirror_mode": {
|
65 |
+
"name": "ipython",
|
66 |
+
"version": 3
|
67 |
+
},
|
68 |
+
"file_extension": ".py",
|
69 |
+
"mimetype": "text/x-python",
|
70 |
+
"name": "python",
|
71 |
+
"nbconvert_exporter": "python",
|
72 |
+
"pygments_lexer": "ipython3",
|
73 |
+
"version": "3.10.0"
|
74 |
+
}
|
75 |
+
},
|
76 |
+
"nbformat": 4,
|
77 |
+
"nbformat_minor": 2
|
78 |
+
}
|
tools/__init__.py
ADDED
File without changes
|
tools/asr/config.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
def check_fw_local_models():
|
4 |
+
'''
|
5 |
+
启动时检查本地是否有 Faster Whisper 模型.
|
6 |
+
'''
|
7 |
+
model_size_list = [
|
8 |
+
"tiny", "tiny.en",
|
9 |
+
"base", "base.en",
|
10 |
+
"small", "small.en",
|
11 |
+
"medium", "medium.en",
|
12 |
+
"large", "large-v1",
|
13 |
+
"large-v2", "large-v3"]
|
14 |
+
for i, size in enumerate(model_size_list):
|
15 |
+
if os.path.exists(f'tools/asr/models/faster-whisper-{size}'):
|
16 |
+
model_size_list[i] = size + '-local'
|
17 |
+
return model_size_list
|
18 |
+
|
19 |
+
asr_dict = {
|
20 |
+
"达摩 ASR (中文)": {
|
21 |
+
'lang': ['zh'],
|
22 |
+
'size': ['large'],
|
23 |
+
'path': 'funasr_asr.py',
|
24 |
+
'precision': ['float32']
|
25 |
+
},
|
26 |
+
"Faster Whisper (多语种)": {
|
27 |
+
'lang': ['auto', 'zh', 'en', 'ja'],
|
28 |
+
'size': check_fw_local_models(),
|
29 |
+
'path': 'fasterwhisper_asr.py',
|
30 |
+
'precision': ['float32', 'float16', 'int8']
|
31 |
+
},
|
32 |
+
}
|
33 |
+
|
tools/asr/fasterwhisper_asr.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
import traceback
|
4 |
+
|
5 |
+
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
|
6 |
+
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
7 |
+
|
8 |
+
import torch
|
9 |
+
from faster_whisper import WhisperModel
|
10 |
+
from tqdm import tqdm
|
11 |
+
|
12 |
+
from tools.asr.config import check_fw_local_models
|
13 |
+
|
14 |
+
language_code_list = [
|
15 |
+
"af", "am", "ar", "as", "az",
|
16 |
+
"ba", "be", "bg", "bn", "bo",
|
17 |
+
"br", "bs", "ca", "cs", "cy",
|
18 |
+
"da", "de", "el", "en", "es",
|
19 |
+
"et", "eu", "fa", "fi", "fo",
|
20 |
+
"fr", "gl", "gu", "ha", "haw",
|
21 |
+
"he", "hi", "hr", "ht", "hu",
|
22 |
+
"hy", "id", "is", "it", "ja",
|
23 |
+
"jw", "ka", "kk", "km", "kn",
|
24 |
+
"ko", "la", "lb", "ln", "lo",
|
25 |
+
"lt", "lv", "mg", "mi", "mk",
|
26 |
+
"ml", "mn", "mr", "ms", "mt",
|
27 |
+
"my", "ne", "nl", "nn", "no",
|
28 |
+
"oc", "pa", "pl", "ps", "pt",
|
29 |
+
"ro", "ru", "sa", "sd", "si",
|
30 |
+
"sk", "sl", "sn", "so", "sq",
|
31 |
+
"sr", "su", "sv", "sw", "ta",
|
32 |
+
"te", "tg", "th", "tk", "tl",
|
33 |
+
"tr", "tt", "uk", "ur", "uz",
|
34 |
+
"vi", "yi", "yo", "zh", "yue",
|
35 |
+
"auto"]
|
36 |
+
|
37 |
+
def execute_asr(input_folder, output_folder, model_size, language, precision):
|
38 |
+
if '-local' in model_size:
|
39 |
+
model_size = model_size[:-6]
|
40 |
+
model_path = f'tools/asr/models/faster-whisper-{model_size}'
|
41 |
+
else:
|
42 |
+
model_path = model_size
|
43 |
+
if language == 'auto':
|
44 |
+
language = None #不设置语种由模型自动输出概率最高的语种
|
45 |
+
print("loading faster whisper model:",model_size,model_path)
|
46 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
47 |
+
try:
|
48 |
+
model = WhisperModel(model_path, device=device, compute_type=precision)
|
49 |
+
except:
|
50 |
+
return print(traceback.format_exc())
|
51 |
+
|
52 |
+
input_file_names = os.listdir(input_folder)
|
53 |
+
input_file_names.sort()
|
54 |
+
|
55 |
+
output = []
|
56 |
+
output_file_name = os.path.basename(input_folder)
|
57 |
+
|
58 |
+
for file_name in tqdm(input_file_names):
|
59 |
+
try:
|
60 |
+
file_path = os.path.join(input_folder, file_name)
|
61 |
+
segments, info = model.transcribe(
|
62 |
+
audio = file_path,
|
63 |
+
beam_size = 5,
|
64 |
+
vad_filter = True,
|
65 |
+
vad_parameters = dict(min_silence_duration_ms=700),
|
66 |
+
language = language)
|
67 |
+
text = ''
|
68 |
+
|
69 |
+
if info.language == "zh":
|
70 |
+
print("检测为中文文本, 转 FunASR 处理")
|
71 |
+
if("only_asr"not in globals()):
|
72 |
+
from tools.asr.funasr_asr import \
|
73 |
+
only_asr # #如果用英文就不需要导入下载模型
|
74 |
+
text = only_asr(file_path)
|
75 |
+
|
76 |
+
if text == '':
|
77 |
+
for segment in segments:
|
78 |
+
text += segment.text
|
79 |
+
output.append(f"{file_path}|{output_file_name}|{info.language.upper()}|{text}")
|
80 |
+
except:
|
81 |
+
print(traceback.format_exc())
|
82 |
+
|
83 |
+
output_folder = output_folder or "output/asr_opt"
|
84 |
+
os.makedirs(output_folder, exist_ok=True)
|
85 |
+
output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
|
86 |
+
|
87 |
+
with open(output_file_path, "w", encoding="utf-8") as f:
|
88 |
+
f.write("\n".join(output))
|
89 |
+
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")
|
90 |
+
return output_file_path
|
91 |
+
|
92 |
+
if __name__ == '__main__':
|
93 |
+
parser = argparse.ArgumentParser()
|
94 |
+
parser.add_argument("-i", "--input_folder", type=str, required=True,
|
95 |
+
help="Path to the folder containing WAV files.")
|
96 |
+
parser.add_argument("-o", "--output_folder", type=str, required=True,
|
97 |
+
help="Output folder to store transcriptions.")
|
98 |
+
parser.add_argument("-s", "--model_size", type=str, default='large-v3',
|
99 |
+
choices=check_fw_local_models(),
|
100 |
+
help="Model Size of Faster Whisper")
|
101 |
+
parser.add_argument("-l", "--language", type=str, default='ja',
|
102 |
+
choices=language_code_list,
|
103 |
+
help="Language of the audio files.")
|
104 |
+
parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16','float32','int8'],
|
105 |
+
help="fp16, int8 or fp32")
|
106 |
+
|
107 |
+
cmd = parser.parse_args()
|
108 |
+
output_file_path = execute_asr(
|
109 |
+
input_folder = cmd.input_folder,
|
110 |
+
output_folder = cmd.output_folder,
|
111 |
+
model_size = cmd.model_size,
|
112 |
+
language = cmd.language,
|
113 |
+
precision = cmd.precision,
|
114 |
+
)
|
tools/asr/funasr_asr.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding:utf-8 -*-
|
2 |
+
|
3 |
+
import argparse
|
4 |
+
import os
|
5 |
+
import traceback
|
6 |
+
from tqdm import tqdm
|
7 |
+
# from funasr.utils import version_checker
|
8 |
+
# version_checker.check_for_update = lambda: None
|
9 |
+
from funasr import AutoModel
|
10 |
+
|
11 |
+
path_asr = 'tools/asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'
|
12 |
+
path_vad = 'tools/asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch'
|
13 |
+
path_punc = 'tools/asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch'
|
14 |
+
path_asr = path_asr if os.path.exists(path_asr) else "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
15 |
+
path_vad = path_vad if os.path.exists(path_vad) else "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
|
16 |
+
path_punc = path_punc if os.path.exists(path_punc) else "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
|
17 |
+
|
18 |
+
|
19 |
+
model = AutoModel(
|
20 |
+
model = path_asr,
|
21 |
+
model_revision = "v2.0.4",
|
22 |
+
vad_model = path_vad,
|
23 |
+
vad_model_revision = "v2.0.4",
|
24 |
+
punc_model = path_punc,
|
25 |
+
punc_model_revision = "v2.0.4",
|
26 |
+
)
|
27 |
+
|
28 |
+
def only_asr(input_file):
|
29 |
+
try:
|
30 |
+
text = model.generate(input=input_file)[0]["text"]
|
31 |
+
except:
|
32 |
+
text = ''
|
33 |
+
print(traceback.format_exc())
|
34 |
+
return text
|
35 |
+
|
36 |
+
def execute_asr(input_folder, output_folder, model_size, language):
|
37 |
+
input_file_names = os.listdir(input_folder)
|
38 |
+
input_file_names.sort()
|
39 |
+
|
40 |
+
output = []
|
41 |
+
output_file_name = os.path.basename(input_folder)
|
42 |
+
|
43 |
+
for file_name in tqdm(input_file_names):
|
44 |
+
try:
|
45 |
+
file_path = os.path.join(input_folder, file_name)
|
46 |
+
text = model.generate(input=file_path)[0]["text"]
|
47 |
+
output.append(f"{file_path}|{output_file_name}|{language.upper()}|{text}")
|
48 |
+
except:
|
49 |
+
print(traceback.format_exc())
|
50 |
+
|
51 |
+
output_folder = output_folder or "output/asr_opt"
|
52 |
+
os.makedirs(output_folder, exist_ok=True)
|
53 |
+
output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
|
54 |
+
|
55 |
+
with open(output_file_path, "w", encoding="utf-8") as f:
|
56 |
+
f.write("\n".join(output))
|
57 |
+
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")
|
58 |
+
return output_file_path
|
59 |
+
|
60 |
+
if __name__ == '__main__':
|
61 |
+
parser = argparse.ArgumentParser()
|
62 |
+
parser.add_argument("-i", "--input_folder", type=str, required=True,
|
63 |
+
help="Path to the folder containing WAV files.")
|
64 |
+
parser.add_argument("-o", "--output_folder", type=str, required=True,
|
65 |
+
help="Output folder to store transcriptions.")
|
66 |
+
parser.add_argument("-s", "--model_size", type=str, default='large',
|
67 |
+
help="Model Size of FunASR is Large")
|
68 |
+
parser.add_argument("-l", "--language", type=str, default='zh', choices=['zh'],
|
69 |
+
help="Language of the audio files.")
|
70 |
+
parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16','float32'],
|
71 |
+
help="fp16 or fp32")#还没接入
|
72 |
+
|
73 |
+
cmd = parser.parse_args()
|
74 |
+
execute_asr(
|
75 |
+
input_folder = cmd.input_folder,
|
76 |
+
output_folder = cmd.output_folder,
|
77 |
+
model_size = cmd.model_size,
|
78 |
+
language = cmd.language,
|
79 |
+
)
|
tools/asr/models/.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
*
|
2 |
+
!.gitignore
|
tools/cmd-denoise.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os,argparse
|
2 |
+
import traceback
|
3 |
+
|
4 |
+
from modelscope.pipelines import pipeline
|
5 |
+
from modelscope.utils.constant import Tasks
|
6 |
+
from tqdm import tqdm
|
7 |
+
|
8 |
+
path_denoise = 'tools/denoise-model/speech_frcrn_ans_cirm_16k'
|
9 |
+
path_denoise = path_denoise if os.path.exists(path_denoise) else "damo/speech_frcrn_ans_cirm_16k"
|
10 |
+
ans = pipeline(Tasks.acoustic_noise_suppression,model=path_denoise)
|
11 |
+
def execute_denoise(input_folder,output_folder):
|
12 |
+
os.makedirs(output_folder,exist_ok=True)
|
13 |
+
# print(input_folder)
|
14 |
+
# print(list(os.listdir(input_folder).sort()))
|
15 |
+
for name in tqdm(os.listdir(input_folder)):
|
16 |
+
try:
|
17 |
+
ans("%s/%s"%(input_folder,name),output_path='%s/%s'%(output_folder,name))
|
18 |
+
except:
|
19 |
+
traceback.print_exc()
|
20 |
+
|
21 |
+
if __name__ == '__main__':
|
22 |
+
parser = argparse.ArgumentParser()
|
23 |
+
parser.add_argument("-i", "--input_folder", type=str, required=True,
|
24 |
+
help="Path to the folder containing WAV files.")
|
25 |
+
parser.add_argument("-o", "--output_folder", type=str, required=True,
|
26 |
+
help="Output folder to store transcriptions.")
|
27 |
+
parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16','float32'],
|
28 |
+
help="fp16 or fp32")#还没接入
|
29 |
+
cmd = parser.parse_args()
|
30 |
+
execute_denoise(
|
31 |
+
input_folder = cmd.input_folder,
|
32 |
+
output_folder = cmd.output_folder,
|
33 |
+
)
|
tools/denoise-model/.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
*
|
2 |
+
!.gitignore
|
tools/i18n/i18n.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import locale
|
3 |
+
import os
|
4 |
+
|
5 |
+
I18N_JSON_DIR : os.PathLike = os.path.join(os.path.dirname(os.path.relpath(__file__)), 'locale')
|
6 |
+
|
7 |
+
def load_language_list(language):
|
8 |
+
with open(os.path.join(I18N_JSON_DIR, f"{language}.json"), "r", encoding="utf-8") as f:
|
9 |
+
language_list = json.load(f)
|
10 |
+
return language_list
|
11 |
+
|
12 |
+
class I18nAuto:
|
13 |
+
def __init__(self, language=None):
|
14 |
+
if language in ["Auto", None]:
|
15 |
+
language = locale.getdefaultlocale()[0]
|
16 |
+
# getlocale can't identify the system's language ((None, None))
|
17 |
+
if not os.path.exists(os.path.join(I18N_JSON_DIR, f"{language}.json")):
|
18 |
+
language = "en_US"
|
19 |
+
self.language = language
|
20 |
+
self.language_map = load_language_list(language)
|
21 |
+
|
22 |
+
def __call__(self, key):
|
23 |
+
return self.language_map.get(key, key)
|
24 |
+
|
25 |
+
def __repr__(self):
|
26 |
+
return "Use Language: " + self.language
|
27 |
+
|
28 |
+
if __name__ == "__main__":
|
29 |
+
i18n = I18nAuto(language='en_US')
|
30 |
+
print(i18n)
|
tools/i18n/locale/en_US.json
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb): Best choice for dual-channel reverberation, cannot remove single-channel reverberation;",
|
3 |
+
"(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho: Removes delay effects. Aggressive mode removes more thoroughly than Normal mode. DeReverb additionally removes reverberation, can remove mono reverberation, but does not clean heavily high-frequency plate reverberation.",
|
4 |
+
"*GPT模型列表": "*GPT models list",
|
5 |
+
"*SoVITS模型列表": "*SoVITS models list",
|
6 |
+
"*实验/模型名": "*Experiment/model name",
|
7 |
+
"*文本标注文件": "*Text labelling file",
|
8 |
+
"*训练集音频文件目录": "*Audio dataset folder",
|
9 |
+
"*请上传并填写参考信息": "*Please upload and fill reference information",
|
10 |
+
"*请填写需要合成的目标文本和语种模式": "*Please fill in the target text and language mode for synthesis",
|
11 |
+
".list标注文件的路径": ".list annotation file path",
|
12 |
+
"0-前置数据集获取工具": "0-Fetch dataset",
|
13 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 webui (for vocal separation, deecho, dereverb and denoise)",
|
14 |
+
"0b-语音切分工具": "0b-Audio slicer",
|
15 |
+
"0bb-语音降噪工具": "0bb-Voice denoiser",
|
16 |
+
"0c-中文批量离线ASR工具": "0c-Chinese ASR tool",
|
17 |
+
"0d-语音文本校对标注工具": "0d-Speech to text proofreading tool",
|
18 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SOVITS-TTS",
|
19 |
+
"1A-训练集格式化工具": "1A-Dataset formatting",
|
20 |
+
"1Aa-文本内容": "1Aa-Text",
|
21 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-One-click formatting",
|
22 |
+
"1Ab-SSL自监督特征提取": "1Ab-SSL self-supervised feature extraction",
|
23 |
+
"1Ac-语义token提取": "1Ac-semantics token extraction",
|
24 |
+
"1B-微调训练": "1B-Fine-tuned training",
|
25 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS training. The model is located in SoVITS_weights.",
|
26 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT training. The model is located in GPT_weights.",
|
27 |
+
"1C-推理": "1C-inference",
|
28 |
+
"1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. The DeEcho-DeReverb model's processing time is nearly twice that of the other two DeEcho models.",
|
29 |
+
"1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;": "1. Preserve Vocals: Choose this option for audio without harmonies, as it better retains the main vocal compared to the HP5 model. This option includes two built-in models, HP2 and HP3. HP3 may slightly let through some accompaniment but retains the main vocal slightly better than HP2.",
|
30 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-Voice Changer",
|
31 |
+
"2、MDX-Net-Dereverb模型挺慢的;": "2、MDX-Net-Dereverb Model is slow;",
|
32 |
+
"2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;": "2. Keep Only Main Vocal: Choose this option for audio with harmonies, as it may slightly reduce the main vocal. Includes one built-in HP5 model;",
|
33 |
+
"3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "3. Personal Recommendation for the cleanest configuration: First use MDX-Net followed by DeEcho-Aggressive",
|
34 |
+
"3、去混响、去延迟模型(by FoxJoy):": "3. Reverberation and delay removal model(by FoxJoy):",
|
35 |
+
"ASR 模型": "ASR model",
|
36 |
+
"ASR 模型尺寸": "ASR model size",
|
37 |
+
"数据类型精度": "Computing precision",
|
38 |
+
"ASR 语言设置": "ASR language",
|
39 |
+
"ASR进程输出信息": "ASR output log",
|
40 |
+
"GPT模型列表": "GPT weight list",
|
41 |
+
"GPT训练进程输出信息": "GPT training output log",
|
42 |
+
"GPU卡号,只能填1个整数": "GPU number, can only input ONE integer",
|
43 |
+
"GPU卡号以-分割,每个卡号一个进程": "GPU number is separated by -, each GPU will run one process ",
|
44 |
+
"SSL进程输出信息": "SSL output log",
|
45 |
+
"SoVITS模型列表": "SoVITS weight list",
|
46 |
+
"SoVITS训练进程输出信息": "SoVITS training output log",
|
47 |
+
"TTS推理WebUI进程输出信息": "TTS inference webui output log",
|
48 |
+
"TTS推理进程已关闭": "TTS inference process closed",
|
49 |
+
"TTS推理进程已开启": "TTS inference process is opened",
|
50 |
+
"UVR5已关闭": "UVR5 closed",
|
51 |
+
"UVR5已开启": "UVR5 opened ",
|
52 |
+
"UVR5进程输出信息": "UVR5 process output log",
|
53 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix: proportion of normalized audio merged into dataset",
|
54 |
+
"gpt采样参数(无参考文本时不要太低。不懂就用默认):": "GPT sampling parameters (not too low when there's no reference text. Use default if unsure):",
|
55 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: FO hop size, the smaller the value, the higher the accuracy)",
|
56 |
+
"max:归一化后最大值多少": "Loudness multiplier after normalized",
|
57 |
+
"max_sil_kept:切完后静音最多留多长": "Maximum length for silence to be kept",
|
58 |
+
"min_interval:最短切割间隔": "Minumum interval for audio cutting",
|
59 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: the minimum length of each segment. If the first segment is too short, it will be concatenated with the next segment until it exceeds this value",
|
60 |
+
"temperature": "temperature",
|
61 |
+
"threshold:音量小于这个值视作静音的备选切割点": "Noise gate threshold (loudness below this value will be treated as noise",
|
62 |
+
"top_k": "top_k",
|
63 |
+
"top_p": "top_p",
|
64 |
+
"一键三连进程输出信息": "One-click formatting output",
|
65 |
+
"不切": "No slice",
|
66 |
+
"中文": "Chinese",
|
67 |
+
"中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e": "Chinese Tutorial:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e",
|
68 |
+
"中英混合": "Chinese-English Mixed",
|
69 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "Multiple audio files can also be imported. If a folder path exists, this input is ignored.",
|
70 |
+
"人声伴奏分离批量处理, 使用UVR5模型。": "Batch processing for vocal and instrumental separation, using the UVR5 model.",
|
71 |
+
"人声提取激进程度": "Vocal extraction aggressiveness",
|
72 |
+
"伴奏人声分离&去混响&去回声": "Vocals/Accompaniment Separation & Reverberation Removal",
|
73 |
+
"使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。": "When using the no-reference text mode, it is recommended to use a fine-tuned GPT. If the reference audio is unclear and you don't know what to write, you can enable this feature, which will ignore the reference text you've entered.",
|
74 |
+
"保存频率save_every_epoch": "Save frequency (save_every_epoch):",
|
75 |
+
"凑50字一切": "Cut per 50 characters",
|
76 |
+
"凑四句一切": "Slice once every 4 sentences",
|
77 |
+
"切分后文本": "Text after sliced",
|
78 |
+
"切分后的子音频的输出根目录": "Audio slicer output folder",
|
79 |
+
"切割使用的进程数": "CPU threads used for audio slicing",
|
80 |
+
"刷新模型路径": "refreshing model paths",
|
81 |
+
"前端处理后的文本(每句):": "Processed text from the frontend (per sentence):",
|
82 |
+
"去混响/去延迟,附:": "Dereverberation/Delay Removal, including:",
|
83 |
+
"参考音频在3~10秒范围外,请更换!": "Reference audio is outside the 3-10 second range, please choose another one!",
|
84 |
+
"参考音频的文本": "Text for reference audio",
|
85 |
+
"参考音频的语种": "Language for reference audio",
|
86 |
+
"合成语音": "Start inference",
|
87 |
+
"合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。": "An example of a valid folder path format: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例 (simply copy the address from the file manager's address bar).",
|
88 |
+
"后续将支持转音素、手工修改音素、语音合成分步执行。": " Step-to-step phoneme transformation and modification coming soon!",
|
89 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。": "Please fill in the segmented audio files' directory! The full path of the audio file = the directory concatenated with the filename corresponding to the waveform in the list file (not the full path). If left blank, the absolute full path in the .list file will be used.",
|
90 |
+
"多语种混合": "Multilingual Mixed",
|
91 |
+
"实际输入的参考文本:": "Actual Input Reference Text:",
|
92 |
+
"实际输入的目标文本(切句后):": "Actual Input Target Text (after sentence segmentation):",
|
93 |
+
"实际输入的目标文本(每句):": "Actual Input Target Text (per sentence):",
|
94 |
+
"实际输入的目标文本:": "Actual Input Target Text:",
|
95 |
+
"导出文件格式": "Export file format",
|
96 |
+
"开启GPT训练": "Start GPT training",
|
97 |
+
"开启SSL提取": "Start SSL extracting",
|
98 |
+
"开启SoVITS训练": "Start SoVITS training",
|
99 |
+
"开启一键三连": "Start one-click formatting",
|
100 |
+
"开启文本获取": "Start speech-to-text",
|
101 |
+
"开启无参考文本模式。不填参考文本亦相当于开启。": "Enable no reference mode. If you don't fill 'Text for reference audio', no reference mode will be enabled.",
|
102 |
+
"开启离线批量ASR": "Start batch ASR",
|
103 |
+
"开启语义token提取": "Start semantics token extraction",
|
104 |
+
"开启语音切割": "Start audio slicer",
|
105 |
+
"开启���音降噪": "Start voice denoiser",
|
106 |
+
"怎么切": "How to slice the sentence",
|
107 |
+
"总训练轮数total_epoch": "Total training epochs (total_epoch):",
|
108 |
+
"总训练轮数total_epoch,不建议太高": "Total epochs, do not increase to a value that is too high",
|
109 |
+
"打标工具WebUI已关闭": "proofreading tool webui is closed",
|
110 |
+
"打标工具WebUI已开启": "proofreading tool webui is opened",
|
111 |
+
"打标工具进程输出信息": "Proofreading tool output log",
|
112 |
+
"指定输出主人声文件夹": "Specify the output folder for vocals:",
|
113 |
+
"指定输出非主人声文件夹": "Specify the output folder for accompaniment:",
|
114 |
+
"按中文句号。切": "Slice by Chinese punct",
|
115 |
+
"按标点符号切": "Slice by every punct",
|
116 |
+
"按英文句号.切": "Slice by English punct",
|
117 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Text slicer tool, since there will be issues when infering long texts, so it is advised to cut first. When infering, it will infer respectively then combined together.",
|
118 |
+
"文本模块学习率权重": "Text model learning rate weighting",
|
119 |
+
"文本进程输出信息": "Text processing output",
|
120 |
+
"施工中,请静候佳音": "In construction, please wait",
|
121 |
+
"日文": "Japanese",
|
122 |
+
"日英混合": "Japanese-English Mixed",
|
123 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Save only the latest '.ckpt' file to save disk space:",
|
124 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Save a small final model to the 'weights' folder at each save point:",
|
125 |
+
"是否开启TTS推理WebUI": "Open TTS inference WEBUI",
|
126 |
+
"是否开启UVR5-WebUI": "Open UVR5-WebUI",
|
127 |
+
"是否开启dpo训练选项(实验性)": "Enable DPO training (experimental feature)",
|
128 |
+
"是否开启打标WebUI": "Open labelling WebUI",
|
129 |
+
"是否直接对上次合成结果调整语速。防止随机性。": "Whether to directly adjust the speech rate of the last synthesis result to prevent randomness.",
|
130 |
+
"显卡信息": "GPU Information",
|
131 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible. <br>If you do not agree with this clause, you cannot use or reference any codes and files within the software package. See the root directory <b>Agreement-LICENSE</b> for details.",
|
132 |
+
"模型": "Model",
|
133 |
+
"模型分为三类:": "Models are categorized into three types:",
|
134 |
+
"模型切换": "Model switch",
|
135 |
+
"每张显卡的batch_size": "Batch size per GPU:",
|
136 |
+
"终止ASR进程": "Stop ASR task",
|
137 |
+
"终止GPT训练": "Stop GPT training",
|
138 |
+
"终止SSL提取进程": "Stop SSL extraction",
|
139 |
+
"终止SoVITS训练": "Stop SoVITS training",
|
140 |
+
"终止一键三连": "Stop one-click formatting",
|
141 |
+
"终止文本获取进程": "Stop speech-to-text",
|
142 |
+
"终止语义token提取进程": "Stop semantics token extraction",
|
143 |
+
"终止语音切割": "Stop audio cutting",
|
144 |
+
"终止语音降噪进程": "Stop voice denoising",
|
145 |
+
"英文": "English",
|
146 |
+
"语义token提取进程输出信息": "Sematics token extraction output log",
|
147 |
+
"语速": "Speech rate",
|
148 |
+
"语速调整,高为更快": "Adjust speech rate, higher for faster",
|
149 |
+
"语音切割进程输出信息": "Audio slicer output log",
|
150 |
+
"语音降噪进程输出信息": "Voice Denoiser Process Output Information",
|
151 |
+
"请上传3~10秒内参考音频,超过会报错!": "Please upload a reference audio within the 3-10 second range; if it exceeds this duration, it will raise errors.",
|
152 |
+
"请输入有效文本": "Please enter valid text.",
|
153 |
+
"转换": "Convert",
|
154 |
+
"输入待处理音频文件夹路径": "Enter the path of the audio folder to be processed:",
|
155 |
+
"输入文件夹路径": "Input folder path",
|
156 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "output folder (logs/{experiment name}) should have files and folders starts with 23456.",
|
157 |
+
"输出信息": "Output information",
|
158 |
+
"输出文件夹路径": "Output folder path",
|
159 |
+
"输出的语音": "Inference Result",
|
160 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Choose the models from SoVITS_weights and GPT_weights. The default one is a pretrain, so you can experience zero shot TTS.",
|
161 |
+
"降噪结果输出文件夹": "Denoised Results Output Folder",
|
162 |
+
"降噪音频文件输入文件夹": "Denoising Audio File Input Folder",
|
163 |
+
"需要合成的切分前文本": "Inference text that needs to be sliced",
|
164 |
+
"需要合成的文本": "Inference text",
|
165 |
+
"需要合成的语种": "Inference text language",
|
166 |
+
"音频自动切分输入路径,可文件可文件夹": "Audio slicer input (file or folder)",
|
167 |
+
"预训练的GPT模型路径": "Pretrained GPT model path",
|
168 |
+
"预训练的SSL模型路径": "Pretrained SSL model path",
|
169 |
+
"预训练的SoVITS-D模型路径": "Pretrained SoVITS-D model path",
|
170 |
+
"预训练的SoVITS-G模型路径": "Pretrained SoVITS-G model path",
|
171 |
+
"预训练的中文BERT模型路径": " Pretrained BERT model path"
|
172 |
+
}
|
tools/i18n/locale/es_ES.json
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net (onnx_dereverb): reverberación estéreo, la mejor opción; no puede eliminar reverberación mono",
|
3 |
+
"(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho: Eliminar el efecto de retardo. Aggressive elimina más que Normal, DeReverb elimina reverberación adicional, puede eliminar reverberación mono, pero no limpia bien la reverberación de placa de alta frecuencia",
|
4 |
+
"*GPT模型列表": "*Lista de modelos GPT",
|
5 |
+
"*SoVITS模型列表": "*Lista de modelos SoVITS",
|
6 |
+
"*实验/模型名": "*Nombre del experimento/modelo",
|
7 |
+
"*文本标注文件": "*Archivo de etiquetado de texto",
|
8 |
+
"*训练集音频文件目录": "*Directorio de archivos de audio de entrenamiento",
|
9 |
+
"*请上传并填写参考信息": "*Por favor, suba y complete la información de referencia",
|
10 |
+
"*请填写需要合成的目标文本和语种模式": "*Por favor, complete el texto objetivo a sintetizar y el modo de idioma",
|
11 |
+
".list标注文件的路径": "Ruta del archivo de anotación .list",
|
12 |
+
"0-前置数据集获取工具": "0-Herramienta de obtención de conjunto de datos previo",
|
13 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-Herramienta de separación de voz y acompañamiento UVR5 y eliminación de reverberación y retardo",
|
14 |
+
"0b-语音切分工具": "0b-Herramienta de división de voz",
|
15 |
+
"0bb-语音降噪工具": "0bb-Herramienta de reducción de ruido de voz",
|
16 |
+
"0c-中文批量离线ASR工具": "0c-Herramienta de ASR en lote fuera de línea en chino",
|
17 |
+
"0d-语音文本校对标注工具": "0d-Herramienta de corrección y etiquetado de texto de voz",
|
18 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
19 |
+
"1A-训练集格式化工具": "1A-Herramienta de formateo del conjunto de datos de entrenamiento",
|
20 |
+
"1Aa-文本内容": "1Aa-Contenido del texto",
|
21 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-Formateo del conjunto de datos de entrenamiento en un solo paso",
|
22 |
+
"1Ab-SSL自监督特征提取": "1Ab-Extracción de características auto-supervisada SSL",
|
23 |
+
"1Ac-语义token提取": "1Ac-Extracción de tokens semánticos",
|
24 |
+
"1B-微调训练": "1B-Entrenamiento de ajuste fino",
|
25 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-Entrenamiento de SoVITS. Los archivos de modelo para compartir se encuentran en SoVITS_weights.",
|
26 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-Entrenamiento de GPT. Los archivos de modelo para compartir se encuentran en GPT_weights.",
|
27 |
+
"1C-推理": "1C-Inferencia",
|
28 |
+
"1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. El modelo DeEcho-DeReverb tarda casi el doble que los otros dos modelos DeEcho",
|
29 |
+
"1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;": "1. Retener voz principal: seleccione este para audio sin coros, retiene mejor la voz principal que HP5. Incluye dos modelos, HP2 y HP3; HP3 puede filtrar ligeramente el acompañamiento pero retiene mejor la voz principal que HP2",
|
30 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-Cambio de voz",
|
31 |
+
"2、MDX-Net-Dereverb模型挺慢的;": "2. El modelo MDX-Net-Dereverb es bastante lento",
|
32 |
+
"2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;": "2. Solo retener voz principal: seleccione este para audio con coros, puede debilitar la voz principal. Incluye un modelo HP5",
|
33 |
+
"3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "3. La configuración más limpia recomendada es primero MDX-Net, luego DeEcho-Aggressive",
|
34 |
+
"3、去混响、去延迟模型(by FoxJoy):": "3. Modelos de eliminación de reverberación y retardo (por FoxJoy)",
|
35 |
+
"ASR 模型": "Modelo ASR",
|
36 |
+
"ASR 模型尺寸": "Tamaño del modelo ASR",
|
37 |
+
"数据类型精度": "precisión del tipo de datos",
|
38 |
+
"ASR 语言设置": "Configuración del idioma ASR",
|
39 |
+
"ASR进程输出信息": "Información de salida del proceso ASR",
|
40 |
+
"GPT模型列表": "Lista de modelos GPT",
|
41 |
+
"GPT训练进程输出信息": "Información de salida del proceso de entrenamiento de GPT",
|
42 |
+
"GPU卡号,只能填1个整数": "Número de tarjeta GPU, solo se puede ingresar un número entero",
|
43 |
+
"GPU卡号以-分割,每个卡号一个进程": "Número de tarjeta GPU separado por '-', cada número de tarjeta es un proceso",
|
44 |
+
"SSL进程输出信息": "Información de salida del proceso SSL",
|
45 |
+
"SoVITS模型列表": "Lista de modelos SoVITS",
|
46 |
+
"SoVITS训练进程输出信息": "Información de salida del proceso de entrenamiento de SoVITS",
|
47 |
+
"TTS推理WebUI进程输出信息": "Información de salida del proceso de interfaz web de inferencia TTS",
|
48 |
+
"TTS推理进程已关闭": "Proceso de inferencia TTS cerrado",
|
49 |
+
"TTS推理进程已开启": "Proceso de inferencia TTS iniciado",
|
50 |
+
"UVR5已关闭": "UVR5 está deshabilitado",
|
51 |
+
"UVR5已开启": "UVR5 está habilitado",
|
52 |
+
"UVR5进程输出信息": "Información de salida del proceso UVR5",
|
53 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix: proporción de mezcla de audio normalizado que entra",
|
54 |
+
"gpt采样参数(无参考文本时不要太低。不懂就用默认):": "Parámetros de muestreo de GPT (no demasiado bajos cuando no hay texto de referencia. Use los valores por defecto si no está seguro):",
|
55 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: cómo calcular la curva de volumen, cuanto más pequeño, mayor precisión pero mayor carga computacional (mayor precisión no significa mejor rendimiento)",
|
56 |
+
"max:归一化后最大值多少": "max: valor máximo después de la normalización",
|
57 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept: duración máxima del silencio después del corte",
|
58 |
+
"min_interval:最短切割间隔": "min_interval: intervalo mínimo de corte",
|
59 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: longitud mínima de cada segmento; si el primer segmento es demasiado corto, se une al siguiente hasta superar este valor",
|
60 |
+
"temperature": "temperatura",
|
61 |
+
"threshold:音量小于这个值视作静音的备选切割点": "umbral: puntos de corte alternativos considerados como silencio si el volumen es menor que este valor",
|
62 |
+
"top_k": "top_k",
|
63 |
+
"top_p": "top_p",
|
64 |
+
"一键三连进程输出信息": "Información de salida del proceso de triple acción",
|
65 |
+
"不切": "No cortar",
|
66 |
+
"中文": "Chino",
|
67 |
+
"中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e": "Documentación del tutorial en chino: https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e",
|
68 |
+
"中英混合": "Chino e inglés mezclados",
|
69 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "También se pueden ingresar archivos de audio por lotes, seleccionar uno, prioridad para leer carpetas",
|
70 |
+
"人声伴奏分离批量处理, 使用UVR5模型。": "Procesamiento por lotes de separación de voz y acompañamiento utilizando el modelo UVR5",
|
71 |
+
"人声提取激进程度": "Nivel de agresividad en la extracción de voz",
|
72 |
+
"伴奏人声分离&去混响&去回声": "Separación de acompañamiento y voz principal y eliminación de reverberación y eco",
|
73 |
+
"使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。": "Se recomienda usar un GPT ajustado en modo sin texto de referencia; habilítelo si no puede entender el audio de referencia (si no sabe qué escribir). Una vez habilitado, ignorará el texto de referencia ingresado.",
|
74 |
+
"保存频率save_every_epoch": "Frecuencia de guardado (cada epoch)",
|
75 |
+
"凑50字一切": "Todo para alcanzar las 50 palabras",
|
76 |
+
"凑四句一切": "Completa cuatro oraciones para rellenar todo",
|
77 |
+
"切分后文本": "Texto después de la división",
|
78 |
+
"切分后的子音频的输出根目录": "Directorio raíz de salida de los sub-audios después de la división",
|
79 |
+
"切割使用的进程数": "Número de procesos utilizados para la división",
|
80 |
+
"刷新模型路径": "Actualizar la ruta del modelo",
|
81 |
+
"前端处理后的文本(每句):": "Texto después del procesamiento previo (por frase):",
|
82 |
+
"去混响/去延迟,附:": "Eliminación de reverberación/retardo, incluye:",
|
83 |
+
"参考音频在3~10秒范围外,请更换!": "El audio de referencia está fuera del rango de 3 a 10 segundos, ¡por favor cámbielo!",
|
84 |
+
"参考音频的文本": "Texto de referencia del audio",
|
85 |
+
"参考音频的语种": "Idioma del audio de referencia",
|
86 |
+
"合成语音": "Síntesis de voz",
|
87 |
+
"合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。": "Ejemplo de formato de ruta de carpeta válida: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例 (simplemente copie desde la barra de direcciones del administrador de archivos).",
|
88 |
+
"后续将支持转音素、手工修改音素、语音合成分步执行。": "En el futuro se admitirá la conversión de fonemas, la modificación manual de fonemas y la síntesis de voz paso a paso.",
|
89 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。": "Ingrese el directorio donde se encuentran los audios después de la división. La ruta completa de los archivos de audio leídos = este directorio + nombre de archivo correspondiente en el archivo .list (no la ruta completa). Si se deja en blanco, se utilizará la ruta completa del archivo .list.",
|
90 |
+
"多语种混合": "Mezcla de varios idiomas",
|
91 |
+
"实际输入的参考文本:": "Texto de referencia realmente ingresado:",
|
92 |
+
"实际输入的目标文本(切句后):": "Texto objetivo realmente ingresado (después de dividir en frases):",
|
93 |
+
"实际输入的目标文本(每句):": "Texto objetivo realmente ingresado (por frase):",
|
94 |
+
"实际输入的目标文本:": "Texto objetivo realmente ingresado:",
|
95 |
+
"导出文件格式": "Formato de archivo de exportación",
|
96 |
+
"开启GPT训练": "Iniciar entrenamiento de GPT",
|
97 |
+
"开启SSL提取": "Habilitar la extracción SSL",
|
98 |
+
"开启SoVITS训练": "Iniciar entrenamiento de SoVITS",
|
99 |
+
"开启一键三连": "Habilitar un solo paso de formateo",
|
100 |
+
"开启文本获取": "Habilitar la obtención de texto",
|
101 |
+
"开启无参考文本模式。不填参考文本亦相当于开启。": "Habilitar el modo sin texto de referencia. No llenar el texto de referencia también lo habilita.",
|
102 |
+
"开启离线批量ASR": "Habilitar ASR en lote fuera de línea",
|
103 |
+
"开启语义token提取": "Habilitar la extracción de tokens semánticos",
|
104 |
+
"开启语音切割": "Habilitar la división de voz",
|
105 |
+
"开启语音降噪": "Habilitar la reducción de ruido de voz",
|
106 |
+
"怎么切": "Cómo cortar",
|
107 |
+
"总训练轮数total_epoch": "Número total de épocas de entrenamiento",
|
108 |
+
"总训练轮数total_epoch,不建议太高": "Número total de épocas de entrenamiento, no se recomienda demasiado alto",
|
109 |
+
"打标工具WebUI已关闭": "Interfaz web de la herramienta de etiquetado cerrada",
|
110 |
+
"打标工具WebUI已开启": "Interfaz web de la herramienta de etiquetado iniciada",
|
111 |
+
"打标工具进程输出信息": "Información de salida del proceso de la herramienta de etiquetado",
|
112 |
+
"指定输出主人声文件夹": "Especificar carpeta de salida de voz principal",
|
113 |
+
"指定输出非主人声文件夹": "Especificar carpeta de salida de no voz principal",
|
114 |
+
"按中文句号。切": "Cortar según puntos en chino",
|
115 |
+
"按标点符号切": "Cortar según los signos de puntuación",
|
116 |
+
"按英文句号.切": "Cortar por puntos en inglés.",
|
117 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Herramienta de división de texto. El resultado de la síntesis puede no ser bueno para textos demasiado largos, por lo que se recomienda dividirlos primero. La síntesis se realiza separando el texto según los saltos de línea y luego uniendo los fragmentos.",
|
118 |
+
"文本模块学习率权重": "Peso de la tasa de aprendizaje del módulo de texto",
|
119 |
+
"文本进程输出信息": "Información de salida del proceso de obtención de texto",
|
120 |
+
"施工中,请静候佳音": "En construcción, por favor espere pacientemente",
|
121 |
+
"日文": "Japonés",
|
122 |
+
"日英混合": "Mezcla de japonés e inglés",
|
123 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "¿Guardar solo el último archivo ckpt para ahorrar espacio en disco?",
|
124 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "¿Guardar el modelo final pequeño en la carpeta de pesos en cada punto de guardado?",
|
125 |
+
"是否开启TTS推理WebUI": "¿Habilitar la interfaz web de inferencia TTS?",
|
126 |
+
"是否开启UVR5-WebUI": "¿Habilitar UVR5-WebUI?",
|
127 |
+
"是否开启dpo训练选项(实验性)": "¿Habilitar la opción de entrenamiento dpo (experimental)?",
|
128 |
+
"是否开启打标WebUI": "¿Habilitar la interfaz web de etiquetado?",
|
129 |
+
"是否直接对上次合成结果调整语速。防止随机性。": "¿Si se ajusta directamente la velocidad de habla del último resultado de síntesis para evitar aleatoriedad?",
|
130 |
+
"显卡信息": "Información de la tarjeta gráfica",
|
131 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Este software es de código abierto bajo la licencia MIT. El autor no tiene control sobre el software. El usuario que lo utilice o distribuya, y el que genere sonidos a partir del software, asume toda la responsabilidad. <br>Si no acepta estos términos, no puede utilizar ni hacer referencia a ningún código o archivo dentro del paquete de software. Consulte el archivo <b>LICENSE</b> en el directorio raíz para obtener más detalles.",
|
132 |
+
"模型": "Modelo",
|
133 |
+
"模型分为三类:": "Los modelos se dividen en tres categorías:",
|
134 |
+
"模型切换": "Cambio de modelo",
|
135 |
+
"每张显卡的batch_size": "Tamaño de lote por tarjeta gráfica",
|
136 |
+
"终止ASR进程": "Terminar el proceso ASR",
|
137 |
+
"终止GPT训练": "Detener entrenamiento de GPT",
|
138 |
+
"终止SSL提取进程": "Terminar el proceso de extracción SSL",
|
139 |
+
"终止SoVITS训练": "Detener entrenamiento de SoVITS",
|
140 |
+
"终止一键三连": "Terminar el proceso de un solo paso de formateo",
|
141 |
+
"终止文本获取进程": "Terminar el proceso de obtención de texto",
|
142 |
+
"终止语义token提取进程": "Terminar el proceso de extracción de tokens semánticos",
|
143 |
+
"终止语音切割": "Terminar la división de voz",
|
144 |
+
"终止语音降噪进程": "Terminar el proceso de reducción de ruido de voz",
|
145 |
+
"英文": "Inglés",
|
146 |
+
"语义token提取进程输出信息": "Información de salida del proceso de extracción de tokens semánticos",
|
147 |
+
"语速": "Velocidad de habla",
|
148 |
+
"语速调整,高为更快": "Ajustar la velocidad de habla, más alta para más rápido",
|
149 |
+
"语音切割进程输出信息": "Información de salida del proceso de división de voz",
|
150 |
+
"语音降噪进程输出信息": "Información de salida del proceso de reducción de ruido de voz",
|
151 |
+
"请上传3~10秒内参考音频,超过会报错!": "Por favor, suba un audio de referencia de entre 3 y 10 segundos, ¡más de eso causará un error!",
|
152 |
+
"请输入有效文本": "Por favor, introduzca un texto válido",
|
153 |
+
"转换": "Convertir",
|
154 |
+
"输入待处理音频文件夹路径": "Ingrese la ruta de la carpeta de audio a procesar",
|
155 |
+
"输入文件夹路径": "Ingrese la ruta de la carpeta",
|
156 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "Debe haber archivos y carpetas que comiencen con 23456 en el directorio logs/nombre del experimento",
|
157 |
+
"输出信息": "Información de salida",
|
158 |
+
"输出文件夹路径": "Ruta de la carpeta de salida",
|
159 |
+
"输出的语音": "Audio de salida",
|
160 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Seleccione el modelo almacenado en SoVITS_weights y GPT_weights después del entrenamiento. Uno de ellos es el modelo base, útil para experimentar con TTS de 5 segundos sin entrenamiento.",
|
161 |
+
"降噪结果输出文件夹": "Carpeta de salida de los resultados de reducción de ruido",
|
162 |
+
"降噪音频文件输入文件夹": "Carpeta de entrada de archivos de audio para reducción de ruido",
|
163 |
+
"需要合成的切分前文本": "Texto a sintetizar antes de la división",
|
164 |
+
"需要合成的文本": "Texto a sintetizar",
|
165 |
+
"需要合成的语种": "Idioma para la síntesis",
|
166 |
+
"音频自动切分输入路径,可文件可文件夹": "Ruta de entrada para la división automática de audio, puede ser un archivo o una carpeta",
|
167 |
+
"预训练的GPT模型路径": "Ruta del modelo GPT preentrenado",
|
168 |
+
"预训练的SSL模型路径": "Ruta del modelo SSL preentrenado",
|
169 |
+
"预训练的SoVITS-D模型路径": "Ruta del modelo SoVITS-D preentrenado",
|
170 |
+
"预训练的SoVITS-G模型路径": "Ruta del modelo SoVITS-G preentrenado",
|
171 |
+
"预训练的中文BERT模型路径": "Ruta del modelo BERT en chino preentrenado"
|
172 |
+
}
|
tools/i18n/locale/fr_FR.json
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1) MDX-Net (onnx_dereverb) : C'est le meilleur choix pour la réverbération à deux canaux, mais il ne peut pas éliminer la réverbération à un seul canal;",
|
3 |
+
"(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho : Supprime les effets de délai. Aggressive est plus exhaustif que Normal dans la suppression, DeReverb élimine également la réverbération, peut supprimer la réverbération monocanal, mais n'élimine pas complètement la réverbération de plaque à haute fréquence.",
|
4 |
+
"*GPT模型列表": "*Liste des modèles GPT",
|
5 |
+
"*SoVITS模型列表": "*Liste des modèles SoVITS",
|
6 |
+
"*实验/模型名": "*Nom de l'expérience/modèle",
|
7 |
+
"*文本标注文件": "*Fichier d'annotation de texte",
|
8 |
+
"*训练集音频文件目录": "*Répertoire des fichiers audio d'entraînement",
|
9 |
+
"*请上传并填写参考信息": "*Veuillez télécharger et remplir les informations de référence",
|
10 |
+
"*请填写需要合成的目标文本和语种模式": "*Veuillez saisir le texte cible à synthétiser et le mode de langue.",
|
11 |
+
".list标注文件的路径": "Chemin du fichier d'annotation .list",
|
12 |
+
"0-前置数据集获取工具": "0-Outil de récupération de jeu de données préalable",
|
13 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-Outil de séparation de la voix humaine et de l'accompagnement UVR5 & suppression de la réverbération et du retard",
|
14 |
+
"0b-语音切分工具": "0b-Outil de découpage vocal",
|
15 |
+
"0bb-语音降噪工具": "0bb-Outil de réduction du bruit vocal",
|
16 |
+
"0c-中文批量离线ASR工具": "0c-Outil chinois de transcription automatique hors ligne en masse",
|
17 |
+
"0d-语音文本校对标注工具": "0d-Outil de correction et d'annotation de texte vocal",
|
18 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
19 |
+
"1A-训练集格式化工具": "1A-Outil de formatage du jeu de données d'entraînement",
|
20 |
+
"1Aa-文本内容": "1Aa-Contenu du texte",
|
21 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-Formatage en un clic du jeu de données d'entraînement",
|
22 |
+
"1Ab-SSL自监督特征提取": "1Ab-Extraction de caractéristiques auto-supervisée SSL",
|
23 |
+
"1Ac-语义token提取": "1Ac-Extraction de jetons sémantiques",
|
24 |
+
"1B-微调训练": "1B-Entraînement fin",
|
25 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-Entraînement SoVITS. Les fichiers de modèle destinés au partage sont enregistrés sous SoVITS_weights.",
|
26 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-Entraînement GPT. Les fichiers de modèle destinés au partage sont enregistrés sous GPT_weights.",
|
27 |
+
"1C-推理": "1C-Inférence",
|
28 |
+
"1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. Le temps de traitement du modèle DeEcho-DeReverb est presque le double de celui des deux autres modèles DeEcho;",
|
29 |
+
"1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;": "1. Préserver les voix : Choisissez cette option pour les audio sans harmonie, car elle conserve mieux la voix principale par rapport au modèle HP5. Deux modèles intégrés, HP2 et HP3, sont disponibles. HP3 peut légèrement laisser passer l'accompagnement mais conserve la voix principale un peu mieux que HP2;",
|
30 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-Modification de la voix",
|
31 |
+
"2、MDX-Net-Dereverb模型挺慢的;": "2. Le modèle MDX-Net-Dereverb est assez lent;",
|
32 |
+
"2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;": "2. Conserver uniquement la voix principale : Choisissez cette option pour les audio avec harmonie, car elle peut affaiblir la voix principale. Un modèle HP5 intégré est disponible;",
|
33 |
+
"3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "3. La configuration la plus propre que je recommande est d'utiliser d'abord MDX-Net, puis DeEcho-Aggressive.",
|
34 |
+
"3、去混响、去延迟模型(by FoxJoy):": "3. Modèle de suppression de réverbération et de retard (par FoxJoy) :",
|
35 |
+
"ASR 模型": "Modèle ASR",
|
36 |
+
"ASR 模型尺寸": "Taille du modèle ASR",
|
37 |
+
"数据类型精度": "précision du type de données",
|
38 |
+
"ASR 语言设置": "Paramètres de langue ASR",
|
39 |
+
"ASR进程输出信息": "Informations de processus ASR",
|
40 |
+
"GPT模型列表": "Liste des modèles GPT",
|
41 |
+
"GPT训练进程输出信息": "Informations de processus d'entraînement GPT",
|
42 |
+
"GPU卡号,只能填1个整数": "Numéro de carte GPU, ne peut contenir qu'un seul entier",
|
43 |
+
"GPU卡号以-分割,每个卡号一个进程": "Numéro de carte GPU séparé par des tirets, un processus par numéro de carte",
|
44 |
+
"SSL进程输出信息": "Informations de processus SSL",
|
45 |
+
"SoVITS模型列表": "Liste des modèles SoVITS",
|
46 |
+
"SoVITS训练进程输出信息": "Informations de processus d'entraînement SoVITS",
|
47 |
+
"TTS推理WebUI进程输出信息": "Informations de processus de l'interface Web d'inférence TTS",
|
48 |
+
"TTS推理进程已关闭": "Le processus d'inférence TTS est terminé",
|
49 |
+
"TTS推理进程已开启": "Le processus d'inférence TTS est en cours",
|
50 |
+
"UVR5已关闭": "UVR5 est désactivé",
|
51 |
+
"UVR5已开启": "UVR5 est activé",
|
52 |
+
"UVR5进程输出信息": "Informations de processus UVR5",
|
53 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix: proportion d'audio normalisé mélangé",
|
54 |
+
"gpt采样参数(无参考文本时不要太低。不懂就用默认):": "Paramètres d'échantillonnage de GPT (ne pas mettre trop bas lorsqu'il n'y a pas de texte de référence. Utilisez les valeurs par défaut si vous n'êtes pas sûr):",
|
55 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: comment calculer la courbe de volume, plus petit pour une précision plus élevée mais une charge de calcul plus élevée (ce n'est pas une meilleure précision)",
|
56 |
+
"max:归一化后最大值多少": "max: valeur maximale après normalisation",
|
57 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept: durée maximale de silence après la coupe",
|
58 |
+
"min_interval:最短切割间隔": "min_interval: intervalle de coupe minimum",
|
59 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:longueur minimale de chaque segment ; si le premier segment est trop court, il est concaténé avec les segments suivants jusqu'à ce que la longueur dépasse cette valeur",
|
60 |
+
"temperature": "température",
|
61 |
+
"threshold:音量小于这个值视作静音的备选切割点": "seuil: le volume inférieur à cette valeur est considéré comme un point de coupe silencieux alternatif",
|
62 |
+
"top_k": "top_k",
|
63 |
+
"top_p": "top_p",
|
64 |
+
"一键三连进程输出信息": "Informations de processus de l'un clic trois connexions",
|
65 |
+
"不切": "Pas de découpe",
|
66 |
+
"中文": "Chinois",
|
67 |
+
"中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e": "Documentation du tutoriel en chinois:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e",
|
68 |
+
"中英混合": "Mélange de chinois et d'anglais",
|
69 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "Également possible d'entrer en lot des fichiers audio, au choix, privilégiez la lecture du dossier",
|
70 |
+
"人声伴奏分离批量处理, 使用UVR5模型。": "Traitement par lot de séparation voix-accompagnement en utilisant le modèle UVR5.",
|
71 |
+
"人声提取激进程度": "Degré d'extraction des voix",
|
72 |
+
"伴奏人声分离&去混响&去回声": "Séparation de la voix et de l'accompagnement, suppression de la réverbération et de l'écho",
|
73 |
+
"使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。": "Il est recommandé d'utiliser GPT finement ajusté en mode sans texte de référence. Si vous ne comprenez pas ce que dit l'audio de référence (vous ne savez pas quoi écrire), vous pouvez l'activer ; une fois activé, ignorez le texte de référence saisi.",
|
74 |
+
"保存频率save_every_epoch": "Fréquence de sauvegarde (sauvegarder à chaque époque)",
|
75 |
+
"凑50字一切": "Assembler 50 mots tout",
|
76 |
+
"凑四句一切": "Composez quatre phrases pour tout remplir",
|
77 |
+
"切分后文本": "Texte après découpage",
|
78 |
+
"切分后的子音频的输出根目录": "Répertoire racine de sortie des sous-audios après découpage",
|
79 |
+
"切割使用的进程数": "Nombre de processus utilisés pour le découpage",
|
80 |
+
"刷新模型路径": "Actualiser le chemin du modèle",
|
81 |
+
"前端处理后的文本(每句):": "Texte après traitement frontal (par phrase):",
|
82 |
+
"去混响/去延迟,附:": "Suppression de la réverbération / suppression du retard, ci-joint:",
|
83 |
+
"参考音频在3~10秒范围外,请更换!": "Veuillez remplacer l'audio de référence si sa durée est en dehors de la plage de 3 à 10 secondes!",
|
84 |
+
"参考音频的文本": "Texte de l'audio de référence",
|
85 |
+
"参考音频的语种": "Langue de l'audio de référence",
|
86 |
+
"合成语音": "Synthèse vocale",
|
87 |
+
"合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。": "Exemple de format de chemin de dossier valide : E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例 (copiez-le depuis la barre d'adresse de l'explorateur de fichiers).",
|
88 |
+
"后续将支持转音素、手工修改音素、语音合成分步执行。": "Des fonctionnalités futures incluront la conversion en phonèmes, la modification manuelle des phonèmes et l'exécution par étapes de la synthèse vocale.",
|
89 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。": "Veuillez indiquer le répertoire contenant les audio découpés ! Le chemin complet du fichier audio à lire = ce répertoire - nom du fichier correspondant à l'onde dans le fichier .list (pas le chemin complet). Si laissé vide, le chemin absolu dans le fichier .list sera utilisé.",
|
90 |
+
"多语种混合": "Mélange multilingue",
|
91 |
+
"实际输入的参考文本:": "Texte de référence réellement saisi:",
|
92 |
+
"实际输入的目标文本(切句后):": "Texte cible réellement saisi (après découpage):",
|
93 |
+
"实际输入的目标文本(每句):": "Texte cible réellement saisi (par phrase):",
|
94 |
+
"实际输入的目标文本:": "Texte cible réellement saisi:",
|
95 |
+
"导出文件格式": "Format d'exportation du fichier",
|
96 |
+
"开启GPT训练": "Activer l'entraînement GPT",
|
97 |
+
"开启SSL提取": "Activer l'extraction SSL",
|
98 |
+
"开启SoVITS训练": "Activer l'entraînement SoVITS",
|
99 |
+
"开启一键三连": "Activer l'un clic trois connexions",
|
100 |
+
"开启文本获取": "Activer l'extraction de texte",
|
101 |
+
"开启无参考文本模式。不填参考文本亦相当于开启。": "Activer le mode sans texte de référence. Laisser le texte de référence vide équivaut également à activer le mode.",
|
102 |
+
"开启离线批量ASR": "Activer la transcription automatique hors ligne en masse",
|
103 |
+
"开启语义token提取": "Activer l'extraction de jetons sémantiques",
|
104 |
+
"开启语音切割": "Activer le découpage vocal",
|
105 |
+
"开启语音降噪": "Activer la réduction de bruit vocal",
|
106 |
+
"怎么切": "Comment découper",
|
107 |
+
"总训练轮数total_epoch": "Nombre total d'époques d'entraînement",
|
108 |
+
"总训练轮数total_epoch,不建议太高": "Nombre total d'époques d'entraînement, pas recommandé d'être trop élevé",
|
109 |
+
"打标工具WebUI已关闭": "L'interface Web de l'outil d'annotation est terminée",
|
110 |
+
"打标工具WebUI已开启": "L'interface Web de l'outil d'annotation est en cours",
|
111 |
+
"打标工具进程输出信息": "Informations de processus de l'outil d'annotation",
|
112 |
+
"指定输出主人声文件夹": "Spécifier le dossier de sortie pour la voix principale",
|
113 |
+
"指定输出非主人声文件夹": "Spécifier le dossier de sortie pour la non-voix principale",
|
114 |
+
"按中文句号。切": "Couper selon les points en chinois.",
|
115 |
+
"按标点符号切": "Couper selon les signes de ponctuation",
|
116 |
+
"按英文句号.切": "Découpez par des points en anglais",
|
117 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Outil de découpage de texte. Un texte trop long peut ne pas donner un bon résultat, donc il est recommandé de le couper d'abord s'il est trop long. La synthèse se fera en séparant le texte par les sauts de ligne puis en les assemblant.",
|
118 |
+
"文本模块学习率权重": "Poids du taux d'apprentissage du module de texte",
|
119 |
+
"文本进程输出信息": "Informations de processus de texte",
|
120 |
+
"施工中,请静候佳音": "En construction, veuillez attendre patiemment",
|
121 |
+
"日文": "Japonais",
|
122 |
+
"日英混合": "Mélange Japonais-Anglais",
|
123 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Sauvegarder uniquement le dernier fichier ckpt pour économiser de l'espace disque",
|
124 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Sauvegarder le petit modèle final dans le dossier weights à chaque point de sauvegarde",
|
125 |
+
"是否开启TTS推理WebUI": "Activer l'interface Web d'inférence TTS",
|
126 |
+
"是否开启UVR5-WebUI": "Activer UVR5-WebUI",
|
127 |
+
"是否开启dpo训练选项(实验性)": "Activer l'option d'entraînement DPO (expérimental)",
|
128 |
+
"是否开启打标WebUI": "Activer l'interface Web d'annotation",
|
129 |
+
"是否直接对上次合成结果调整语速。防止随机性。": "Est-ce qu'on ajuste directement la vitesse de parole du dernier résultat de synthèse pour éviter l'aléatoire ?",
|
130 |
+
"显卡信息": "Informations sur la carte graphique",
|
131 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Ce logiciel est open source sous la licence MIT. L'auteur n'a aucun contrôle sur le logiciel. Les utilisateurs et les diffuseurs du son exporté par le logiciel en assument l'entière responsabilité. <br>Si vous n'acceptez pas ces termes, vous ne pouvez ni utiliser ni citer aucun code ou fichier à l'intérieur du package. Voir <b>LICENSE</b> dans le répertoire racine pour plus de détails.",
|
132 |
+
"模型": "Modèle",
|
133 |
+
"模型分为三类:": "Les modèles sont classés en trois catégories:",
|
134 |
+
"模型切换": "Changement de modèle",
|
135 |
+
"每张显卡的batch_size": "Taille de lot par carte graphique",
|
136 |
+
"终止ASR进程": "Arrêter le processus ASR",
|
137 |
+
"终止GPT训练": "Arrêter l'entraînement GPT",
|
138 |
+
"终止SSL提取进程": "Arrêter le processus d'extraction SSL",
|
139 |
+
"终止SoVITS训练": "Arrêter l'entraînement SoVITS",
|
140 |
+
"终止一键三连": "Arrêter l'un clic trois connexions",
|
141 |
+
"终止文本获取进程": "Arrêter le processus d'extraction de texte",
|
142 |
+
"终止语义token提取进程": "Arrêter le processus d'extraction de jetons sémantiques",
|
143 |
+
"终止语音切割": "Arrêter le découpage vocal",
|
144 |
+
"终止语音降噪进程": "Arrêter le processus de réduction du bruit vocal",
|
145 |
+
"英文": "Anglais",
|
146 |
+
"语义token提取进程输出信息": "Informations de processus d'extraction de jetons sémantiques",
|
147 |
+
"语速": "Débit de parole",
|
148 |
+
"语速调整,高为更快": "Ajuster la vitesse de parole, plus élevée pour plus rapide",
|
149 |
+
"语音切割进程输出信息": "Informations de processus de découpage vocal",
|
150 |
+
"语音降噪进程输出信息": "Informations de sortie du processus de réduction du bruit vocal",
|
151 |
+
"请上传3~10秒内参考音频,超过会报错!": "Veuillez télécharger une référence audio de 3 à 10 secondes ; les fichiers plus longs généreront une erreur!",
|
152 |
+
"请输入有效文本": "Veuillez entrer un texte valide",
|
153 |
+
"转换": "Conversion",
|
154 |
+
"输入待处理音频文件夹路径": "Entrez le chemin du dossier audio à traiter",
|
155 |
+
"输入文件夹路径": "Chemin du dossier à entrer",
|
156 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "Les fichiers et dossiers commençant par 23456 devraient être présents dans le répertoire logs/nom de l'expérience",
|
157 |
+
"输出信息": "Sortie d'information",
|
158 |
+
"输出文件夹路径": "Chemin du dossier de sortie",
|
159 |
+
"输出的语音": "Audio de sortie",
|
160 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Choisissez le modèle entraîné stocké sous SoVITS_weights et GPT_weights. Par défaut, l'un d'eux est un modèle de base pour l'expérience de TTS Zero Shot de 5 secondes.",
|
161 |
+
"降噪结果输出文件夹": "Dossier de sortie des résultats de réduction du bruit",
|
162 |
+
"降噪音频文件输入文件夹": "Dossier d'entrée des fichiers audio de réduction du bruit",
|
163 |
+
"需要合成的切分前文本": "Texte préalable à la synthèse",
|
164 |
+
"需要合成的文本": "Texte à synthétiser",
|
165 |
+
"需要合成的语种": "Langue de synthèse requise",
|
166 |
+
"音频自动切分输入路径,可文件可文件夹": "Chemin d'entrée automatique de découpage audio, peut être un fichier ou un dossier",
|
167 |
+
"预训练的GPT模型路径": "Chemin du modèle GPT pré-entraîné",
|
168 |
+
"预训练的SSL模型路径": "Chemin du modèle SSL pré-entraîné",
|
169 |
+
"预训练的SoVITS-D模型路径": "Chemin du modèle SoVITS-D pré-entraîné",
|
170 |
+
"预训练的SoVITS-G模型路径": "Chemin du modèle SoVITS-G pré-entraîné",
|
171 |
+
"预训练的中文BERT模型路径": "Chemin du modèle BERT chinois pré-entraîné"
|
172 |
+
}
|
tools/i18n/locale/it_IT.json
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net (onnx_dereverb): È la scelta migliore per la riverberazione a due canali, ma non può rimuovere la riverberazione a canale singolo;",
|
3 |
+
"(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho: Rimuove gli effetti di ritardo. Aggressive è più completo di Normal nella rimozione, DeReverb rimuove ulteriormente la riverberazione, può rimuovere la riverberazione a canale singolo, ma non rimuove completamente la riverberazione a piastra ad alta frequenza.",
|
4 |
+
"*GPT模型列表": "*Lista dei modelli GPT",
|
5 |
+
"*SoVITS模型列表": "*Lista dei modelli SoVITS",
|
6 |
+
"*实验/模型名": "*Nome dell'esperimento/modello",
|
7 |
+
"*文本标注文件": "*File di annotazione del testo",
|
8 |
+
"*训练集音频文件目录": "*Directory dei file audio del set di addestramento",
|
9 |
+
"*请上传并填写参考信息": "*Carica e compila le informazioni di riferimento",
|
10 |
+
"*请填写需要合成的目标文本和语种模式": "*Si prega di inserire il testo di destinazione da sintetizzare e la modalità lingua",
|
11 |
+
".list标注文件的路径": "Percorso del file di annotazione .list",
|
12 |
+
"0-前置数据集获取工具": "0-Strumento di acquisizione del dataset preliminare",
|
13 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-Strumento di separazione voce e accompagnamento UVR5 & Rimozione riverbero e ritardo",
|
14 |
+
"0b-语音切分工具": "0b-Strumento di segmentazione vocale",
|
15 |
+
"0bb-语音降噪工具": "0bb-Strumento di riduzione del rumore vocale",
|
16 |
+
"0c-中文批量离线ASR工具": "0c-Strumento di ASR offline batch in cinese",
|
17 |
+
"0d-语音文本校对标注工具": "0d-Strumento di correzione e annotazione testo vocale",
|
18 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
19 |
+
"1A-训练集格式化工具": "1A-Strumento di formattazione del set di addestramento",
|
20 |
+
"1Aa-文本内容": "1Aa-Contenuto del testo",
|
21 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-Strumento di formattazione del set di addestramento con tre passaggi",
|
22 |
+
"1Ab-SSL自监督特征提取": "1Ab-Estrazione di caratteristiche auto-supervisionata SSL",
|
23 |
+
"1Ac-语义token提取": "1Ac-Estrazione del token semantico",
|
24 |
+
"1B-微调训练": "1B-Allenamento di affinamento",
|
25 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-Allenamento di SoVITS. I file del modello destinati alla condivisione sono salvati in SoVITS_weights.",
|
26 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-Allenamento di GPT. I file del modello destinati alla condivisione sono salvati in GPT_weights.",
|
27 |
+
"1C-推理": "1C-Inferenza",
|
28 |
+
"1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. Il tempo di elaborazione del modello DeEcho-DeReverb è quasi il doppio di quello degli altri due modelli DeEcho;",
|
29 |
+
"1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;": "1. Conserva la voce principale: scegli questa opzione per audio senza armonie, poiché conserva meglio la voce principale rispetto al modello HP5. Include due modelli integrati, HP2 e HP3. HP3 potrebbe far passare leggermente l'accompagnamento ma conserva meglio la voce principale rispetto a HP2;",
|
30 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-Voce modificata",
|
31 |
+
"2、MDX-Net-Dereverb模型挺慢的;": "2. Il modello MDX-Net-Dereverb è piuttosto lento;",
|
32 |
+
"2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;": "2. Solo conserva la voce principale: scegli questa opzione per audio con armonie, poiché potrebbe indebolire la voce principale. Include un modello HP5;",
|
33 |
+
"3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "3. La configurazione più pulita consigliata è MDX-Net seguito da DeEcho-Aggressive.",
|
34 |
+
"3、去混响、去延迟模型(by FoxJoy):": "3. Modello per rimuovere la riverberazione e il ritardo (by FoxJoy):",
|
35 |
+
"ASR 模型": "Modello ASR",
|
36 |
+
"ASR 模型尺寸": "Dimensioni del modello ASR",
|
37 |
+
"数据类型精度": "precisione del tipo di dati",
|
38 |
+
"ASR 语言设置": "Impostazioni linguistiche ASR",
|
39 |
+
"ASR进程输出信息": "Informazioni sull'output del processo ASR",
|
40 |
+
"GPT模型列表": "Elenco dei modelli GPT",
|
41 |
+
"GPT训练进程输出信息": "Informazioni sull'output del processo di allenamento di GPT",
|
42 |
+
"GPU卡号,只能填1个整数": "Numero della scheda grafica, può essere inserito solo un numero intero",
|
43 |
+
"GPU卡号以-分割,每个卡号一个进程": "Numero di GPU separati da '-'; ogni numero corrisponde a un processo",
|
44 |
+
"SSL进程输出信息": "Informazioni sull'output del processo SSL",
|
45 |
+
"SoVITS模型列表": "Elenco dei modelli SoVITS",
|
46 |
+
"SoVITS训练进程输出信息": "Informazioni sull'output del processo di allenamento di SoVITS",
|
47 |
+
"TTS推理WebUI进程输出信息": "Informazioni sull'output del processo dell'interfaccia utente Web per l'inferenza TTS",
|
48 |
+
"TTS推理进程已关闭": "Il processo di inferenza TTS è stato chiuso",
|
49 |
+
"TTS推理进程已开启": "Il processo di inferenza TTS è stato avviato",
|
50 |
+
"UVR5已关闭": "UVR5 è disattivato",
|
51 |
+
"UVR5已开启": "UVR5 è attivato",
|
52 |
+
"UVR5进程输出信息": "Informazioni sull'output del processo UVR5",
|
53 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix: Quanta proporzione dell'audio normalizzato deve essere miscelata",
|
54 |
+
"gpt采样参数(无参考文本时不要太低。不懂就用默认):": "Parametri di campionamento di GPT (non troppo bassi quando non c'è testo di riferimento. Utilizzare i valori predefiniti in caso di incertezza):",
|
55 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: Come calcolare la curva del volume. Più piccolo è, maggiore è la precisione ma aumenta la complessità computazionale (non significa che una maggiore precisione dà risultati migliori)",
|
56 |
+
"max:归一化后最大值多少": "max: Massimo valore dopo la normalizzazione",
|
57 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept: Massima durata del silenzio dopo il taglio",
|
58 |
+
"min_interval:最短切割间隔": "min_interval: Intervallo minimo di taglio",
|
59 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: Lunghezza minima per segmento; se il primo segmento è troppo corto, sarà unito ai segmenti successivi fino a superare questo valore",
|
60 |
+
"temperature": "temperatura",
|
61 |
+
"threshold:音量小于这个值视作静音的备选切割点": "threshold: Punto di taglio alternativo considerato silenzioso se il volume è inferiore a questo valore",
|
62 |
+
"top_k": "top_k",
|
63 |
+
"top_p": "top_p",
|
64 |
+
"一键三连进程输出信息": "Informazioni sull'output del processo di 'One Click Three Connect'",
|
65 |
+
"不切": "Nessuna suddivisione",
|
66 |
+
"中文": "Cinese",
|
67 |
+
"中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e": "Documentazione del tutorial in cinese:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e",
|
68 |
+
"中英混合": "Cinese e inglese misti",
|
69 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "È possibile anche inserire file audio in batch, una delle due opzioni, con priorità alla lettura della cartella",
|
70 |
+
"人声伴奏分离批量处理, 使用UVR5模型。": "Separazione voce-accompagnamento in batch, utilizza il modello UVR5.",
|
71 |
+
"人声提取激进程度": "Grado di aggressività dell'estrazione vocale",
|
72 |
+
"伴奏人声分离&去混响&去回声": "Separazione tra accompagnamento e voce & Rimozione dell'eco & Rimozione dell'eco",
|
73 |
+
"使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。": "Si consiglia di utilizzare GPT fine-tuned quando si utilizza la modalità senza testo di riferimento. Se non si riesce a capire cosa dice l'audio di riferimento (e non si sa cosa scrivere), è possibile abilitare questa opzione, ignorando il testo di riferimento inserito.",
|
74 |
+
"保存频率save_every_epoch": "Frequenza di salvataggio ogni epoca",
|
75 |
+
"凑50字一切": "Riempire con 50 caratteri per tutto",
|
76 |
+
"凑四句一切": "Riempire con quattro frasi per tutto",
|
77 |
+
"切分后文本": "Testo dopo il taglio",
|
78 |
+
"切分后的子音频的输出根目录": "Directory radice di output per gli audio segmentati",
|
79 |
+
"切割使用的进程数": "Numero di processi utilizzati per il taglio",
|
80 |
+
"刷新模型路径": "Aggiorna il percorso del modello",
|
81 |
+
"前端处理后的文本(每句):": "Testo elaborato dal front-end (per frase):",
|
82 |
+
"去混响/去延迟,附:": "Rimozione della riverberazione/ritardo, allegato:",
|
83 |
+
"参考音频在3~10秒范围外,请更换!": "L'audio di riferimento è al di fuori dell'intervallo di 3-10 secondi. Si prega di cambiarlo!",
|
84 |
+
"参考音频的文本": "Testo dell'audio di riferimento",
|
85 |
+
"参考音频的语种": "Lingua dell'audio di riferimento",
|
86 |
+
"合成语音": "Sintesi vocale",
|
87 |
+
"合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。": "Formato di percorso della cartella valido: E:\\codes\\py39\\vits_vc_gpu\\Esempio di test di BaiLuShuangHua (copiare direttamente dalla barra degli indirizzi del gestore file).",
|
88 |
+
"后续将支持转音素、手工修改音素、语音合成分步执行。": "In futuro, sarà supportata la conversione in fonemi, la modifica manuale dei fonemi e l'esecuzione passo-passo della sintesi vocale.",
|
89 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。": "Inserisci la directory dell'audio segmentato! Il percorso completo del file audio letto = questa directory - unione del nome del file corrispondente alle forme d'onda nel file .list (non il percorso completo). Se lasciato vuoto, verrà utilizzato il percorso assoluto nel file .list.",
|
90 |
+
"多语种混合": "Mix multilingue",
|
91 |
+
"实际输入的参考文本:": "Testo di riferimento effettivamente inserito:",
|
92 |
+
"实际输入的目标文本(切句后):": "Testo di destinazione effettivamente inserito (dopo il taglio delle frasi):",
|
93 |
+
"实际输入的目标文本(每句):": "Testo di destinazione effettivamente inserito (per frase):",
|
94 |
+
"实际输入的目标文本:": "Testo di destinazione effettivamente inserito:",
|
95 |
+
"导出文件格式": "Formato di esportazione del file",
|
96 |
+
"开启GPT训练": "Attivare l'allenamento di GPT",
|
97 |
+
"开启SSL提取": "Attivare l'estrazione SSL",
|
98 |
+
"开启SoVITS训练": "Attivare l'allenamento di SoVITS",
|
99 |
+
"开启一键三连": "Attivare la formattazione con tre passaggi",
|
100 |
+
"开启文本获取": "Attivare l'estrazione del testo",
|
101 |
+
"开启无参考文本模式。不填参考文本亦相当于开启。": "Attivare la modalità senza testo di riferimento. Anche se non inserisci un testo di riferimento, la modalità verrà attivata.",
|
102 |
+
"开启离线批量ASR": "Attivare ASR offline batch",
|
103 |
+
"开启语义token提取": "Attivare l'estrazione del token semantico",
|
104 |
+
"开启语音切割": "Attivare la segmentazione vocale",
|
105 |
+
"开启语音降噪": "Attivare la riduzione del rumore vocale",
|
106 |
+
"怎么切": "Come tagliare",
|
107 |
+
"总训练轮数total_epoch": "Numero totale di epoche di addestramento",
|
108 |
+
"总训练轮数total_epoch,不建议太高": "Numero totale di epoche di addestramento, non raccomandato troppo alto",
|
109 |
+
"打标工具WebUI已关闭": "L'interfaccia utente Web dello strumento di annotazione è stata chiusa",
|
110 |
+
"打标工具WebUI已开启": "L'interfaccia utente Web dello strumento di annotazione è stata avviata",
|
111 |
+
"打标工具进程输出信息": "Informazioni sull'output del processo di annotazione",
|
112 |
+
"指定输出主人声文件夹": "Specifica la cartella di output per la voce principale",
|
113 |
+
"指定输出非主人声文件夹": "Specifica la cartella di output per la non voce principale",
|
114 |
+
"按中文句号。切": "Taglia secondo il punto cinese.",
|
115 |
+
"按标点符号切": "Taglia secondo i segni di punteggiatura",
|
116 |
+
"按英文句号.切": "Taglia secondo il punto inglese",
|
117 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Strumento di divisione del testo. I testi troppo lunghi potrebbero non avere un buon effetto di sintesi, quindi è consigliabile dividerli prima della sintesi. La sintesi verrà separata in base ai ritorni a capo nel testo e successivamente ricomposta.",
|
118 |
+
"文本模块学习率权重": "Peso del tasso di apprendimento del modulo di testo",
|
119 |
+
"文本进程输出信息": "Informazioni sull'output del processo di estrazione del testo",
|
120 |
+
"施工中,请静候佳音": "In costruzione, attendi pazientemente le buone notizie",
|
121 |
+
"日文": "Giapponese",
|
122 |
+
"日英混合": "Mix giapponese e inglese",
|
123 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Salvare solo il file ckpt più recente per risparmiare spazio su disco",
|
124 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Salvare il modello finale più piccolo nella cartella weights ad ogni punto di salvataggio",
|
125 |
+
"是否开启TTS推理WebUI": "Attivare l'interfaccia utente Web per l'inferenza TTS",
|
126 |
+
"是否开启UVR5-WebUI": "Attivare UVR5-WebUI",
|
127 |
+
"是否开启dpo训练选项(实验性)": "Attivare l'opzione di addestramento DPO (sperimentale)",
|
128 |
+
"是否开启打标WebUI": "Attivare l'interfaccia utente Web di annotazione",
|
129 |
+
"是否直接对上次合成结果调整语速。防止随机性。": "Se regolare direttamente la velocità della voce dell'ultimo risultato di sintesi per evitare casualità.",
|
130 |
+
"显卡信息": "Informazioni sulla scheda grafica",
|
131 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Questo software è open source con licenza MIT. L'autore non ha alcun controllo sul software. L'utente che utilizza il software o diffonde i suoni derivati dal software ne è responsabile. <br>Se non accetti questi termini, non puoi utilizzare o citare alcun codice o file all'interno del pacchetto software. Vedi la cartella principale<b>LICENSE</b> per i dettagli.",
|
132 |
+
"模型": "Modello",
|
133 |
+
"模型分为三类:": "I modelli sono divisi in tre categorie:",
|
134 |
+
"模型切换": "Cambio del modello",
|
135 |
+
"每张显卡的batch_size": "Batch size per ogni scheda grafica",
|
136 |
+
"终止ASR进程": "Terminare il processo ASR",
|
137 |
+
"终止GPT训练": "Terminare l'allenamento di GPT",
|
138 |
+
"终止SSL提取进程": "Terminare il processo di estrazione SSL",
|
139 |
+
"终止SoVITS训练": "Terminare l'allenamento di SoVITS",
|
140 |
+
"终止一键三连": "Terminare la formattazione con tre passaggi",
|
141 |
+
"终止文本获取进程": "Terminare il processo di estrazione del testo",
|
142 |
+
"终止语义token提取进程": "Terminare il processo di estrazione del token semantico",
|
143 |
+
"终止语音切割": "Terminare la segmentazione vocale",
|
144 |
+
"终止语音降噪进程": "Termina il processo di riduzione del rumore vocale",
|
145 |
+
"英文": "Inglese",
|
146 |
+
"语义token提取进程输出信息": "Informazioni sull'output del processo di estrazione del token semantico",
|
147 |
+
"语速": "Velocità della voce",
|
148 |
+
"语速调整,高为更快": "Regolare la velocità della voce, più alta per più veloce",
|
149 |
+
"语音切割进程输出信息": "Informazioni sull'output del processo di segmentazione vocale",
|
150 |
+
"语音降噪进程输出信息": "Informazioni sull'output del processo di riduzione del rumore vocale",
|
151 |
+
"请上传3~10秒内参考音频,超过会报错!": "Carica un audio di riferimento della durata compresa tra 3 e 10 secondi. Superiore a questo, verrà generato un errore!",
|
152 |
+
"请输入有效文本": "Inserisci un testo valido",
|
153 |
+
"转换": "Converti",
|
154 |
+
"输入待处理音频文件夹路径": "Inserisci il percorso della cartella dei file audio da elaborare",
|
155 |
+
"输入文件夹路径": "Inserisci il percorso della cartella",
|
156 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "Nella cartella logs/nome dell'esperimento dovrebbero esserci file e cartelle che iniziano con 23456",
|
157 |
+
"输出信息": "Informazioni di output",
|
158 |
+
"输出文件夹路径": "Percorso della cartella di output",
|
159 |
+
"输出的语音": "Audio di output",
|
160 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Scegli il modello salvato in SoVITS_weights e GPT_weights dopo l'addestramento. Uno di default è il modello di base, utilizzato per l'esperienza di Zero Shot TTS in 5 secondi.",
|
161 |
+
"降噪结果输出文件夹": "Cartella di output dei risultati di riduzione del rumore",
|
162 |
+
"降噪音频文件输入文件夹": "Cartella di input dei file audio per la riduzione del rumore",
|
163 |
+
"需要合成的切分前文本": "Testo da sintetizzare prima del taglio",
|
164 |
+
"需要合成的文本": "Testo da sintetizzare",
|
165 |
+
"需要合成的语种": "Lingua da sintetizzare",
|
166 |
+
"音频自动切分输入路径,可文件可文件夹": "Percorso di input per la segmentazione automatica dell'audio, può essere un file o una cartella",
|
167 |
+
"预训练的GPT模型路径": "Percorso del modello preaddestrato GPT",
|
168 |
+
"预训练的SSL模型路径": "Percorso del modello SSL preaddestrato",
|
169 |
+
"预训练的SoVITS-D模型路径": "Percorso del modello preaddestrato SoVITS-D",
|
170 |
+
"预训练的SoVITS-G模型路径": "Percorso del modello preaddestrato SoVITS-G",
|
171 |
+
"预训练的中文BERT模型路径": "Percorso del modello BERT cinese preaddestrato"
|
172 |
+
}
|
tools/i18n/locale/ja_JP.json
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb):二重チャンネルのリバーブに最適な選択ですが、単一チャンネルのリバーブは除去できません;",
|
3 |
+
"(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:遅延効果を除去します。AggressiveはNormalよりも徹底的に除去し、DeReverbは追加でリバーブを除去し、モノラルリバーブを除去できますが、高周波数のプレートリバーブは完全には除去できません。",
|
4 |
+
"*GPT模型列表": "*GPTモデルリスト",
|
5 |
+
"*SoVITS模型列表": "*SoVITSモデルリスト",
|
6 |
+
"*实验/模型名": "*実験/モデル名",
|
7 |
+
"*文本标注文件": "*テキスト注釈ファイル",
|
8 |
+
"*训练集音频文件目录": "*トレーニングデータのオーディオファイルディレクトリ",
|
9 |
+
"*请上传并填写参考信息": "*参照情報をアップロードして記入してください",
|
10 |
+
"*请填写需要合成的目标文本和语种模式": "*合成対象テキストと言語モードを入力してください",
|
11 |
+
".list标注文件的路径": ".listアノテーションファイルのパス",
|
12 |
+
"0-前置数据集获取工具": "0-データセット取得ツールの事前処理",
|
13 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5ボーカルアカンパニメント分離&リバーブおよびディレイ除去ツール",
|
14 |
+
"0b-语音切分工具": "0b-音声分割ツール",
|
15 |
+
"0bb-语音降噪工具": "0bb-音声ノイズ除去ツール",
|
16 |
+
"0c-中文批量离线ASR工具": "0c-中国語バッチオフラインASRツール",
|
17 |
+
"0d-语音文本校对标注工具": "0d-音声テキストの校正アノテーションツール",
|
18 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
19 |
+
"1A-训练集格式化工具": "1A-トレーニングデータのフォーマットツール",
|
20 |
+
"1Aa-文本内容": "1Aa-テキストの内容",
|
21 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-トレーニングデータのフォーマットワンクリック三連",
|
22 |
+
"1Ab-SSL自监督特征提取": "1Ab-SSLセルフスーパーバイズ特徴抽出",
|
23 |
+
"1Ac-语义token提取": "1Ac-セマンティックトークン抽出",
|
24 |
+
"1B-微调训练": "1B-ファインチューニングトレーニング",
|
25 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITSトレーニング。共有用のモデルファイルはSoVITS_weightsディレクトリに出力されます。",
|
26 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPTトレーニング。共有用のモデルファイルはGPT_weightsディレクトリに出力されます。",
|
27 |
+
"1C-推理": "1C-推論",
|
28 |
+
"1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1、DeEcho-DeReverbモデルの処理時間は、他の2つのDeEchoモデルのほぼ2倍です;",
|
29 |
+
"1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;": "1、主音を保持: ハーモニーなしの音声にはこのオプションを選択し、HP5よりも主音の保持が優れています。HP2とHP3の2つのモデルが内蔵されており、HP3はわずかに伴奏を漏らす可能性がありますが、HP2よりも主音の保持がわずかに良いです;",
|
30 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-ボイスチェンジャー",
|
31 |
+
"2、MDX-Net-Dereverb模型挺慢的;": "2、MDX-Net-Dereverbモデルはかなり遅いです;",
|
32 |
+
"2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;": "2、主音のみを保持: ハーモニー付きの音声にはこのオプションを選択し、主音が弱くなる可能性があります。HP5モデルが1つ内蔵されています;",
|
33 |
+
"3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "3、最もクリーンな設定は、MDX-Netの後にDeEcho-Aggressiveを使用することをお勧めします。",
|
34 |
+
"3、去混响、去延迟模型(by FoxJoy):": "3、リバーブ除去と遅延除去モデル(by FoxJoy):",
|
35 |
+
"ASR 模型": "ASR モデル",
|
36 |
+
"ASR 模型尺寸": "ASRモデルサイズ",
|
37 |
+
"数据类型精度": "データ型の精度",
|
38 |
+
"ASR 语言设置": "ASR 言語設定",
|
39 |
+
"ASR进程输出信息": "ASRプロセスの出力情報",
|
40 |
+
"GPT模型列表": "GPTモデルリスト",
|
41 |
+
"GPT训练进程输出信息": "GPTトレーニングプロセスの出力情報",
|
42 |
+
"GPU卡号,只能填1个整数": "GPU番号、1つの整数しか入力できません",
|
43 |
+
"GPU卡号以-分割,每个卡号一个进程": "GPU��ード番号はハイフンで区切り、各カード番号ごとに1つのプロセスが実行されます",
|
44 |
+
"SSL进程输出信息": "SSLプロセスの出力情報",
|
45 |
+
"SoVITS模型列表": "SoVITSモデルリスト",
|
46 |
+
"SoVITS训练进程输出信息": "SoVITSトレーニングプロセスの出力情報",
|
47 |
+
"TTS推理WebUI进程输出信息": "TTS推論WebUIプロセスの出力情報",
|
48 |
+
"TTS推理进程已关闭": "TTS推論プロセスが終了しました",
|
49 |
+
"TTS推理进程已开启": "TTS推論プロセスが開始されました",
|
50 |
+
"UVR5已关闭": "UVR5がオフになっています",
|
51 |
+
"UVR5已开启": "UVR5がオンになっています",
|
52 |
+
"UVR5进程输出信息": "UVR5プロセスの出力情報",
|
53 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix:正規化後のオーディオが入る割合",
|
54 |
+
"gpt采样参数(无参考文本时不要太低。不懂就用默认):": "GPT サンプリングパラメーター(参照テキストがない場合はあまり低くしないでください。わからない場合はデフォルトを使用してください):",
|
55 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: 音量曲線の計算方法、小さいほど精度が高くなりますが、計算量が増加します(精度が高いほど必ずしも効果が良いわけではありません)",
|
56 |
+
"max:归一化后最大值多少": "max:正規化後の最大値",
|
57 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept:切り終えた後、最大でどれだけ静かにするか",
|
58 |
+
"min_interval:最短切割间隔": "min_interval:最短カット間隔",
|
59 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:各セグメントの最小長さ。最初のセグメントが短すぎる場合、連続して後続のセグメントに接続され、この値を超えるまで続きます。",
|
60 |
+
"temperature": "temperature",
|
61 |
+
"threshold:音量小于这个值视作静音的备选切割点": "閾値:この値未満の音量は静音と見なされ、代替のカットポイントとして扱われます",
|
62 |
+
"top_k": "top_k",
|
63 |
+
"top_p": "top_p",
|
64 |
+
"一键三连进程输出信息": "ワンクリック三連プロセスの出力情報",
|
65 |
+
"不切": "切らない",
|
66 |
+
"中文": "中国語",
|
67 |
+
"中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e": "中国語チュートリアルドキュメント:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e",
|
68 |
+
"中英混合": "中英混合",
|
69 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "複数のオーディオファイルもインポートできます。フォルダパスが存在する場合、この入力は無視されます。",
|
70 |
+
"人声伴奏分离批量处理, 使用UVR5模型。": "人声と伴奏の分離をバッチ処理で行い、UVR5モデルを使用します。",
|
71 |
+
"人声提取激进程度": "人声抽出の積極性",
|
72 |
+
"伴奏人声分离&去混响&去回声": "ボーカル/伴奏の分離と残響の除去",
|
73 |
+
"使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。": "参考テキストなしモードを使用する場合は、微調整されたGPTの使用をお勧めします。参考音声が聞き取れない場合(何を書けば良いかわからない場合)は、有効にすると、入力した参考テキストを無視します。",
|
74 |
+
"保存频率save_every_epoch": "保存頻度save_every_epoch",
|
75 |
+
"凑50字一切": "50文字ずつカット",
|
76 |
+
"凑四句一切": "4つの文で埋める",
|
77 |
+
"切分后文本": "分割後のテキスト",
|
78 |
+
"切分后的子音频的输出根目录": "分割後のサブオーディオの出力ルートディレクトリ",
|
79 |
+
"切割使用的进程数": "分割に使用されるプロセス数",
|
80 |
+
"刷新模型路径": "モデルのパスを更新",
|
81 |
+
"前端处理后的文本(每句):": "フロントエンド処理後のテキスト(文ごと):",
|
82 |
+
"去混响/去延迟,附:": "残響除去/遅延除去、附:",
|
83 |
+
"参考音频在3~10秒范围外,请更换!": "参照音声が3~10秒の範囲外です。別の音声に変更してください!",
|
84 |
+
"参考音频的文本": "参照オーディオのテキスト",
|
85 |
+
"参考音频的语种": "参照オーディオの言語",
|
86 |
+
"合成语音": "推論を開始",
|
87 |
+
"合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。": "適切なフォルダパスの例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华テストサンプル(ファイルマネージャのアドレスバーからコピーしてください)。",
|
88 |
+
"后续将支持转音素、手工修改音素��语音合成分步执行。": "今後、音素変換、手動音素修正、音声合成の段階的実行をサポートする予定です。",
|
89 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。": "切断後の音声ファイルが格納されているディレクトリを入力してください!読み取り対象の音声ファイルの完全パス = このディレクトリ - 結合 - listファイル内の波形に対応するファイル名(完全パスではありません)。空白の場合、.listファイル内の絶対完全パスを使用します。",
|
90 |
+
"多语种混合": "多言語混合",
|
91 |
+
"实际输入的参考文本:": "実際に入力された参照テキスト:",
|
92 |
+
"实际输入的目标文本(切句后):": "実際に入力された目標テキスト(文分割後):",
|
93 |
+
"实际输入的目标文本(每句):": "実際に入力された目標テキスト(文ごと):",
|
94 |
+
"实际输入的目标文本:": "実際に入力された目標テキスト:",
|
95 |
+
"导出文件格式": "エクスポートファイル形式",
|
96 |
+
"开启GPT训练": "GPTトレーニングを開始",
|
97 |
+
"开启SSL提取": "SSL抽出を開始",
|
98 |
+
"开启SoVITS训练": "SoVITSトレーニングを開始",
|
99 |
+
"开启一键三连": "ワンクリック三連を開始",
|
100 |
+
"开启文本获取": "テキストの取得を開始",
|
101 |
+
"开启无参考文本模式。不填参考文本亦相当于开启。": "参照テキストなしモードを有効にします。参照テキストを入力しない場合も同様に有効になります。",
|
102 |
+
"开启离线批量ASR": "オフラインバッチASRを開始",
|
103 |
+
"开启语义token提取": "セマンティックトークン抽出を開始",
|
104 |
+
"开启语音切割": "音声の分割を開始",
|
105 |
+
"开启语音降噪": "音声ノイズ除去を有効にする",
|
106 |
+
"怎么切": "どうやって切るか",
|
107 |
+
"总训练轮数total_epoch": "総トレーニングエポック数total_epoch",
|
108 |
+
"总训练轮数total_epoch,不建议太高": "総トレーニングエポック数total_epoch、高すぎないようにお勧めします",
|
109 |
+
"打标工具WebUI已关闭": "校正ツールWebUIが終了しました",
|
110 |
+
"打标工具WebUI已开启": "校正ツールWebUIが開始されました",
|
111 |
+
"打标工具进程输出信息": "アノテーションツールプロセスの出力情報",
|
112 |
+
"指定输出主人声文件夹": "ボーカルの出力フォルダを指定:",
|
113 |
+
"指定输出非主人声文件夹": "伴奏の出力フォルダを指定:",
|
114 |
+
"按中文句号。切": "中国語の句点でカット",
|
115 |
+
"按标点符号切": "句読点で分割",
|
116 |
+
"按英文句号.切": "英文のピリオドで切ってください",
|
117 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "テキストスライサーツール。長文を変換すると効果が不安定になる可能性があるため、長文の場合は事前に切り分けることをお勧めします。推論時には、テキストを個別に推論し、それを組み合わせて再構築します。",
|
118 |
+
"文本模块学习率权重": "テキストモジュールの学習率の重み",
|
119 |
+
"文本进程输出信息": "テキストプロセスの出力情報",
|
120 |
+
"施工中,请静候佳音": "施工中、お待ちください",
|
121 |
+
"日文": "日本語",
|
122 |
+
"日英混合": "日英混合",
|
123 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "最新のckptファイルのみを保存してディスクスペースを節約するかどうか",
|
124 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "各保存時間点で最終的な小さなモデルをweightsフォルダに保存するかどうか",
|
125 |
+
"是否开启TTS推理WebUI": "TTS推論WebUIを開く",
|
126 |
+
"是否开启UVR5-WebUI": "UVR5-WebUIをオンにしますか",
|
127 |
+
"是否开启dpo训练选项(实验性)": "DPOトレーニングオプションを有効にするかどうか(実験的)",
|
128 |
+
"是否开启打标WebUI": "WebUIを使用したアノテーションを開始しますか",
|
129 |
+
"是否直接对上次合成结果调整语速。防止随机性。": "直前の合成結果の話速を直接調整して、ランダム性を防ぐか。",
|
130 |
+
"显卡信息": "グラフィックカード情報",
|
131 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "このソフトウェアはMITライセンスでオープンソース化されており、作者はソフトウェアに対して一切の制御権を持っていません。ソフトウェア��使用する者、ソフトウェアから導出される音声を広める者は、自己責任で行ってください。<br>この条件を認めない場合、ソフトウェアパッケージ内の任意のコードやファイルを使用または引用することはできません。詳細はルートディレクトリの<b>LICENSE</b>を参照してください。",
|
132 |
+
"模型": "モデル",
|
133 |
+
"模型分为三类:": "モデルは3種類に分かれています:",
|
134 |
+
"模型切换": "モデル切り替え",
|
135 |
+
"每张显卡的batch_size": "各グラフィックカードのバッチサイズ",
|
136 |
+
"终止ASR进程": "ASRプロセスを停止",
|
137 |
+
"终止GPT训练": "GPTトレーニングを停止",
|
138 |
+
"终止SSL提取进程": "SSL抽出プロセスを停止",
|
139 |
+
"终止SoVITS训练": "SoVITSトレーニングを停止",
|
140 |
+
"终止一键三连": "ワンクリック三連を停止",
|
141 |
+
"终止文本获取进程": "テキスト取得プロセスを停止",
|
142 |
+
"终止语义token提取进程": "セマンティックトークン抽出プロセスを停止",
|
143 |
+
"终止语音切割": "音声の分割を停止",
|
144 |
+
"终止语音降噪进程": "音声ノイズ除去プロセスを終了する",
|
145 |
+
"英文": "英語",
|
146 |
+
"语义token提取进程输出信息": "セマンティックトークン抽出プロセスの出力情報",
|
147 |
+
"语速": "話速",
|
148 |
+
"语速调整,高为更快": "話速調整、高いほど速く",
|
149 |
+
"语音切割进程输出信息": "音声分割プロセスの出力情報",
|
150 |
+
"语音降噪进程输出信息": "音声ノイズ除去プロセスの出力情報",
|
151 |
+
"请上传3~10秒内参考音频,超过会报错!": "3~10秒以内の参照音声をアップロードしてください。それを超えるとエラーが発生します!",
|
152 |
+
"请输入有效文本": "有効なテキストを入力してください",
|
153 |
+
"转换": "変換",
|
154 |
+
"输入待处理音频文件夹路径": "処理するオーディオフォルダのパスを入力してください:",
|
155 |
+
"输入文件夹路径": "入力フォルダのパス",
|
156 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "logs/実験名ディレクトリには23456で始まるファイルとフォルダが含まれている必要があります",
|
157 |
+
"输出信息": "出力情報",
|
158 |
+
"输出文件夹路径": "出力フォルダのパス",
|
159 |
+
"输出的语音": "推論結果",
|
160 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "SoVITS_weightsおよびGPT_weightsに保存されたモデルを選択します。デフォルトのものはプレトレインであり、ゼロショットTTSを体験できます。",
|
161 |
+
"降噪结果输出文件夹": "ノイズ除去結果出力フォルダ",
|
162 |
+
"降噪音频文件输入文件夹": "ノイズ除去音声ファイル入力フォルダ",
|
163 |
+
"需要合成的切分前文本": "推論が必要な分割前のテキスト",
|
164 |
+
"需要合成的文本": "推論テキスト",
|
165 |
+
"需要合成的语种": "推論テキストの言語",
|
166 |
+
"音频自动切分输入路径,可文件可文件夹": "オーディオの自動分割入力パス、ファイルまたはフォルダを指定できます",
|
167 |
+
"预训练的GPT模型路径": "事前にトレーニングされたGPTモデルのパス",
|
168 |
+
"预训练的SSL模型路径": "事前にトレーニングされたSSLモデルのパス",
|
169 |
+
"预训练的SoVITS-D模型路径": "事前にトレーニングされたSoVITS-Dモデルのパス",
|
170 |
+
"预训练的SoVITS-G模型路径": "事前にトレーニングされたSoVITS-Gモデルのパス",
|
171 |
+
"预训练的中文BERT模型路径": "事前にトレーニングされた中文BERTモデルのパス"
|
172 |
+
}
|
tools/i18n/locale/ko_KR.json
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net (onnx_dereverb): 듀얼 채널 리버브에는 가장 적합하지만, 싱글 채널 리버브는 제거할 수 없습니다",
|
3 |
+
"(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:지연 효과를 제거합니다. Aggressive는 Normal보다 더 철저하게 제거하며, DeReverb는 추가로 리버브를 제거하여 단일 채널 리버브를 제거할 수 있지만 고주파 리버브는 완전히 제거하지 못합니다.",
|
4 |
+
"*GPT模型列表": "*GPT 모델 목록",
|
5 |
+
"*SoVITS模型列表": "*SoVITS 모델 목록",
|
6 |
+
"*实验/模型名": "*실험/모델 이름",
|
7 |
+
"*文本标注文件": "*텍스트 주석 파일",
|
8 |
+
"*训练集音频文件目录": "*훈련 세트 오디오 파일 디렉터리",
|
9 |
+
"*请上传并填写参考信息": "*참고 정보를 업로드하고 입력하십시오",
|
10 |
+
"*请填写需要合成的目标文本和语种模式": "*합성할 목표 텍스트와 언어 모드를 입력하세요",
|
11 |
+
".list标注文件的路径": ".list 주석 파일 경로",
|
12 |
+
"0-前置数据集获取工具": "0-전방 데이터 세트 수집 도구",
|
13 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 보컬 및 반주 분리 및 에코 및 지연 제거 도구",
|
14 |
+
"0b-语音切分工具": "0b-음성 분리 도구",
|
15 |
+
"0bb-语音降噪工具": "0bb-음성 노이즈 제거 도구",
|
16 |
+
"0c-中文批量离线ASR工具": "0c-중국어 대량 오프라인 ASR 도구",
|
17 |
+
"0d-语音文本校对标注工具": "0d-음성 텍스트 교정 주석 도구",
|
18 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
19 |
+
"1A-训练集格式化工具": "1A-훈련 세트 형식 지정 도구",
|
20 |
+
"1Aa-文本内容": "1Aa-텍스트 내용",
|
21 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-훈련 세트 형식 지정 일괄 처리",
|
22 |
+
"1Ab-SSL自监督特征提取": "1Ab-SSL 자기 지도 특징 추출",
|
23 |
+
"1Ac-语义token提取": "1Ac-의미 토큰 추출",
|
24 |
+
"1B-微调训练": "1B-미세 조정 훈련",
|
25 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS 훈련. 공유 용 모델 파일은 SoVITS_weights 하위에 출력됩니다.",
|
26 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT 훈련. 공유 용 모델 파일은 GPT_weights 하위에 출력됩니다.",
|
27 |
+
"1C-推理": "1C-추론",
|
28 |
+
"1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. DeEcho-DeReverb 모델의 처리 시간은 다른 두 DeEcho 모델의 거의 두 배입니다;",
|
29 |
+
"1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;": "1. 사람 목소리를 유지: 화음이 없는 오디오를 선택하면 HP5보다 사람 목소리를 더 잘 유지할 수 있습니다. 내장된 HP2와 HP3 모델이 있으며, HP3는 화음을 약간 놓칠 수 있지만 HP2보다 사람 목소리를 조금 더 잘 유지합니다;",
|
30 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-음성 변환",
|
31 |
+
"2、MDX-Net-Dereverb模型挺慢的;": "2. MDX-Net-Dereverb 모델은 꽤 느립니다;",
|
32 |
+
"2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;": "2. 주 목소리만 유지: 화음이 있는 오디오에 이 모델을 선택하면 주 목소리가 약해질 수 있습니다. 내장된 HP5 모델이 있습니다;",
|
33 |
+
"3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "3. 개인적으로 가장 깨끗한 설정은 먼저 MDX-Net을 사용하고 그 다음에 DeEcho-Aggressive를 사용하는 것입니다;",
|
34 |
+
"3、去混响、去延迟模型(by FoxJoy):": "3. 잔향 제거 및 지연 제거 모델 (by FoxJoy):",
|
35 |
+
"ASR 模型": "ASR 모델",
|
36 |
+
"ASR 模型尺寸": "ASR 모델 크기",
|
37 |
+
"数据类型精度": "데이터 유형 정밀도",
|
38 |
+
"ASR 语言设置": "ASR 언어 설정",
|
39 |
+
"ASR进程输出信息": "ASR 프로세스 출력 정보",
|
40 |
+
"GPT模型列表": "GPT 모델 목록",
|
41 |
+
"GPT训练进程输出信息": "GPT 훈련 프로세스 출력 정보",
|
42 |
+
"GPU卡号,只能填1个整数": "GPU 카드 번호, 1개의 정수만 입력 가능",
|
43 |
+
"GPU卡号以-分割,每个卡号一个进程": "GPU 카드 번호는 -로 구분되며 각 카드 번호에 하나의 프로세스가 있어야 함",
|
44 |
+
"SSL进程输出信息": "SSL 프로세스 출력 정보",
|
45 |
+
"SoVITS模型列表": "SoVITS 모델 목록",
|
46 |
+
"SoVITS训练进程输出信息": "SoVITS 훈련 프로세스 출력 정보",
|
47 |
+
"TTS推理WebUI进程输出信息": "TTS 추론 WebUI 프로세스 출력 정보",
|
48 |
+
"TTS推理进程已关闭": "TTS 추론 프로세스가 닫혔습니다",
|
49 |
+
"TTS推理进程已开启": "TTS 추론 프로세스가 열렸습니다",
|
50 |
+
"UVR5已关闭": "UVR5가 비활성화되었습니다",
|
51 |
+
"UVR5已开启": "UVR5가 활성화되었습니다",
|
52 |
+
"UVR5进程输出信息": "UVR5 프로세스 출력 정보",
|
53 |
+
"alpha_mix:混多少比例归一化后音频进来": "알파 믹스: 정규화된 오디오가 들어오는 비율",
|
54 |
+
"gpt采样参数(无参考文本时不要太低。不懂就用默认):": "GPT 샘플링 매개변수 (참조 텍스트가 없을 때 너무 낮게 설정하지 마십시오. 확실하지 않으면 기본값을 사용하십시오):",
|
55 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop 크기: 볼륨 곡선을 계산하는 방법. 작을수록 정확도가 높아지지만 계산량이 높아집니다 (정확도가 높다고 효과가 좋아지지 않음)",
|
56 |
+
"max:归一化后最大值多少": "최대 값 (정규화 후)",
|
57 |
+
"max_sil_kept:切完后静音最多留多长": "최대 유지되는 정적 길이 (분리 후)",
|
58 |
+
"min_interval:最短切割间隔": "최소 분리 간격",
|
59 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:각 부분의 최소 길이, 첫 번째 부분이 너무 짧으면 다음 부분과 계속 연결하여 이 값을 초과할 때까지",
|
60 |
+
"temperature": "온도",
|
61 |
+
"threshold:音量小于这个值视作静音的备选切割点": "임계 값: 이 값보다 작은 볼륨은 대체 분리 지점으로 간주됩니다.",
|
62 |
+
"top_k": "top_k",
|
63 |
+
"top_p": "top_p",
|
64 |
+
"一键三连进程输出信息": "일괄 처리 프로세스 출력 정보",
|
65 |
+
"不切": "자르지 않음",
|
66 |
+
"中文": "중국어",
|
67 |
+
"中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e": "중국어 튜토리얼 문서:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e",
|
68 |
+
"中英混合": "중영 혼합",
|
69 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "오디오 파일을 일괄로 입력할 수도 있습니다. 둘 중 하나를 선택하고 폴더를 읽기를 우선합니다.",
|
70 |
+
"人声伴奏分离批量处理, 使用UVR5模型。": "보컬과 반주 분리 배치 처리, UVR5 모델 사용.",
|
71 |
+
"人声提取激进程度": "보컬 추출의 공격성",
|
72 |
+
"伴奏人声分离&去混响&去回声": "반주 및 보컬 분리 & 리버브 제거 & 에코 제거",
|
73 |
+
"使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。": "참고 텍스트가 없을 때는 미세 조정된 GPT를 사용하는 것이 좋습니다. 참고 오디오에서 무엇을 말하는지 잘 들리지 않으면 이 모드를 켜서 입력한 참고 텍스트를 무시할 수 있습니다.",
|
74 |
+
"保存频率save_every_epoch": "저장 빈도 (각 라운드마다)",
|
75 |
+
"凑50字一切": "50자를 채우십시오",
|
76 |
+
"凑四句一切": "네 문장의 세트를 완성하세요.",
|
77 |
+
"切分后文本": "분리된 텍스트",
|
78 |
+
"切分后的子音频的输出根目录": "분리된 하위 오디오의 출력 기본 디렉터리",
|
79 |
+
"切割使用的进程数": "사용되는 프로세스 수로 자르기",
|
80 |
+
"刷新模型路径": "모델 경로 새로 고침",
|
81 |
+
"前端处理后的文本(每句):": "프론트엔드 처리 후 텍스트(문장별):",
|
82 |
+
"去混响/去延迟,附:": "리버브 제거/지연 제거, 부록:",
|
83 |
+
"参考音频在3~10秒范围外,请更换!": "참고 오디오가 3~10초 범위를 벗어났습니다. 다른 것으로 바꾸십시오!",
|
84 |
+
"参考音频的文本": "참고 오디오의 텍스트",
|
85 |
+
"参考音频的语种": "참고 오디오의 언어",
|
86 |
+
"合成语音": "합성 음성",
|
87 |
+
"合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。": "적절한 폴더 경로 형식 예: E:\\codes\\py39\\vits_vc_gpu\\백로서리 테스트 샘플 (파일 관리자 주소 표시줄에서 복사하면 됩니다).",
|
88 |
+
"后续将支持转音素、手工修改音素、语音合成分步执行。": "앞으로 음소 변환, 음소 수동 수정, 음성 합성 단계별 실행을 지원할 예정입니다.",
|
89 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。": "분리된 오디오가 위치한 디렉터리를 입력하세요! 읽어들인 오디오 파일의 전체 경로 = 이 디렉터리 - list 파일에서 파형에 해당하는 파일명(전체 경로가 아님). 비워 두면 .list 파일의 절대 전체 경로를 사용합니다.",
|
90 |
+
"多语种混合": "다국어 혼합",
|
91 |
+
"实际输入的参考文本:": "실제 입력된 참고 텍스트:",
|
92 |
+
"实际输入的目标文本(切句后):": "실제 입력된 목표 텍스트(문장 분리 후):",
|
93 |
+
"实际输入的目标文本(每句):": "실제 입력된 목표 텍스트(문장별):",
|
94 |
+
"实际输入的目标文本:": "실제 입력된 목표 텍스트:",
|
95 |
+
"导出文件格式": "내보내기 파일 형식",
|
96 |
+
"开启GPT训练": "GPT 훈련 활성화",
|
97 |
+
"开启SSL提取": "SSL 추출 활성화",
|
98 |
+
"开启SoVITS训练": "SoVITS 훈련 활성화",
|
99 |
+
"开启一键三连": "일괄 처리 활성화",
|
100 |
+
"开启文本获取": "텍스트 추출 활성화",
|
101 |
+
"开启无参考文本模式。不填参考文本亦相当于开启。": "참고 텍스트 없이 모드를 활성화합니다. 참고 텍스트를 입력하지 않으면 자동으로 활성화됩니다.",
|
102 |
+
"开启离线批量ASR": "오프라인 대량 ASR 활성화",
|
103 |
+
"开启语义token提取": "의미 토큰 추출 활성화",
|
104 |
+
"开启语音切割": "음성 분리 활성화",
|
105 |
+
"开启语音降噪": "음성 노이즈 제거 활성화",
|
106 |
+
"怎么切": "자르기 옵션",
|
107 |
+
"总训练轮数total_epoch": "총 훈련 라운드 수 (total_epoch)",
|
108 |
+
"总训练轮数total_epoch,不建议太高": "총 훈련 라운드 수 (total_epoch), 너무 높지 않게 권장됨",
|
109 |
+
"打标工具WebUI已关闭": "주석 도구 WebUI가 닫혔습니다",
|
110 |
+
"打标工具WebUI已开启": "주석 도구 WebUI가 열렸습니다",
|
111 |
+
"打标工具进程输出信息": "주석 도구 프로세스 출력 정보",
|
112 |
+
"指定输出主人声文件夹": "지정된 주인 목소리 출력 폴더",
|
113 |
+
"指定输出非主人声文件夹": "지정된 비주인 목소리 출력 폴더",
|
114 |
+
"按中文句号。切": "중국어 문장으로 분리하십시오.",
|
115 |
+
"按标点符号切": "구두점을 기준으로 자르기",
|
116 |
+
"按英文句号.切": "영어 문장으로 분리하기",
|
117 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "텍스트 분리 도구. 너무 긴 텍스트는 합성 결과가 항상 좋지 않을 수 있으므로 너무 길면 먼저 분리하는 것이 좋습니다. 합성은 텍스트 줄 바꿈을 기준으로 분리되어 다시 조합됩니다.",
|
118 |
+
"文本模块学习率权重": "텍스트 모듈 학습률 가중치",
|
119 |
+
"文本进程输出信息": "텍스트 프로세스 출력 정보",
|
120 |
+
"施工中,请静候佳音": "공사 중입니다. 기다려주십시오.",
|
121 |
+
"日文": "일본어",
|
122 |
+
"日英混合": "일본어와 영어 혼합",
|
123 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "디스크 공간을 절약하기 위해 최신 ckpt 파일만 저장할지 여부",
|
124 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "각 저장 시간에 최종 작은 모델을 weights 폴더에 저장할지 여부",
|
125 |
+
"是否开启TTS推理WebUI": "TTS 추론 WebUI 활성화 여부",
|
126 |
+
"是否开启UVR5-WebUI": "UVR5-WebUI를 여시겠습니까?",
|
127 |
+
"是否开启dpo训练选项(实验性)": "dpo 훈련 옵션(실험적) 활성화 여부",
|
128 |
+
"是否开启打标WebUI": "웹 기반 주석 활성화 여부",
|
129 |
+
"是否直接对上次合成结果调整语速。防止随机性。": "직전 합성 결과의 언어 속도를 직접 조정하여 무작위성을 방지할까요?",
|
130 |
+
"显卡信息": "그래픽 카드 정보",
|
131 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "본 소프트웨어는 MIT 라이선스로 오픈 소스로 제공되며, 제작자는 소프트웨어에 대해 어떠한 제어력도 가지지 않습니다. 소프트웨어 사용자 및 소프트웨어에서 내보낸 소리를 전파하는 자는 전적으로 책임져야 합니다. <br>이 조항을 인정하지 않으면 소프트웨어의 코드 및 파일을 사용하거나 인용할 수 없습니다. 루트 디렉터리의 <b>LICENSE</b>를 참조하십시오.",
|
132 |
+
"模型": "모델",
|
133 |
+
"模型分为三类:": "모델은 3가지로 나뉩니다:",
|
134 |
+
"模型切换": "모델 전환",
|
135 |
+
"每张显卡的batch_size": "각 그래픽 카드의 배치 크기",
|
136 |
+
"终止ASR进程": "ASR 프로세스 종료",
|
137 |
+
"终止GPT训练": "GPT 훈련 종료",
|
138 |
+
"终止SSL提取进程": "SSL 추출 프로세스 종료",
|
139 |
+
"终止SoVITS训练": "SoVITS 훈련 종료",
|
140 |
+
"终止一键三连": "일괄 처리 종료",
|
141 |
+
"终止文本获取进程": "텍스트 추출 프로세스 종료",
|
142 |
+
"终止语义token提取进程": "의미 토큰 추출 프로세스 종료",
|
143 |
+
"终止语音切割": "음성 분리 종료",
|
144 |
+
"终止语音降噪进程": "음성 노이즈 제거 프로세스 종료",
|
145 |
+
"英文": "영어",
|
146 |
+
"语义token提取进程输出信息": "의미 토큰 추출 프로세스 출력 정보",
|
147 |
+
"语速": "언어 속도",
|
148 |
+
"���速调整,高为更快": "언어 속도 조정, 높을수록 빠름",
|
149 |
+
"语音切割进程输出信息": "음성 분리 프로세스 출력 정보",
|
150 |
+
"语音降噪进程输出信息": "음성 노이즈 제거 프로세스 출력 정보",
|
151 |
+
"请上传3~10秒内参考音频,超过会报错!": "3~10초 이내의 참고 오디오를 업로드하십시오. 초과하면 오류가 발생합니다!",
|
152 |
+
"请输入有效文本": "유효한 텍스트를 입력하세요",
|
153 |
+
"转换": "변환",
|
154 |
+
"输入待处理音频文件夹路径": "처리 대기 중인 오디오 폴더 경로 입력",
|
155 |
+
"输入文件夹路径": "폴더 경로 입력",
|
156 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "logs/실험 이름 디렉터리에는 23456으로 시작하는 파일과 폴더가 있어야 함",
|
157 |
+
"输出信息": "출력 정보",
|
158 |
+
"输出文件夹路径": "출력 폴더 경로",
|
159 |
+
"输出的语音": "출력 음성",
|
160 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "SoVITS_weights 및 GPT_weights에 저장된 훈련 완료된 모델 중 선택. 기본적으로 하나는 기본 모델이며 5초 Zero Shot TTS를 체험할 수 있습니다.",
|
161 |
+
"降噪结果输出文件夹": "노이즈 제거 결과 출력 폴더",
|
162 |
+
"降噪音频文件输入文件夹": "노이즈 제거 오디오 파일 입력 폴더",
|
163 |
+
"需要合成的切分前文本": "합성해야 할 분할 전 텍스트",
|
164 |
+
"需要合成的文本": "합성해야 할 텍스트",
|
165 |
+
"需要合成的语种": "합성해야 할 언어",
|
166 |
+
"音频自动切分输入路径,可文件可文件夹": "오디오 자동 분리 입력 경로, 파일 또는 폴더 가능",
|
167 |
+
"预训练的GPT模型路径": "사전 훈련된 GPT 모델 경로",
|
168 |
+
"预训练的SSL模型路径": "사전 훈련된 SSL 모델 경로",
|
169 |
+
"预训练的SoVITS-D模型路径": "사전 훈련된 SoVITS-D 모델 경로",
|
170 |
+
"预训练的SoVITS-G模型路径": "사전 훈련된 SoVITS-G 모델 경로",
|
171 |
+
"预训练的中文BERT模型路径": "사전 훈련된 중국어 BERT 모델 경로"
|
172 |
+
}
|
tools/i18n/locale/pt_BR.json
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net (onnx_dereverb): É a melhor opção para reverberação de dois canais, mas não pode remover a reverberação de um único canal;",
|
3 |
+
"(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:Remove os efeitos de atraso. Aggressive é mais completo que Normal na remoção, DeReverb remove adicionalmente a reverberação, pode remover a reverberação de um canal único, mas não remove completamente a reverberação de placa de alta frequência.",
|
4 |
+
"*GPT模型列表": "*Lista de modelos GPT",
|
5 |
+
"*SoVITS模型列表": "*Lista de modelos Sovits",
|
6 |
+
"*实验/模型名": "*Nome do experimento/modelo",
|
7 |
+
"*文本标注文件": "*Arquivo de marcação de texto",
|
8 |
+
"*训练集音频文件目录": "*Diretório de arquivos de áudio do conjunto de treinamento",
|
9 |
+
"*请上传并填写参考信息": "Por favor, faça o upload e preencha as informações de referência",
|
10 |
+
"*请填写需要合成的目标文本和语种模式": "*Por favor, insira o texto alvo a ser sintetizado e o modo de idioma.",
|
11 |
+
".list标注文件的路径": "Caminho do arquivo de anotação .list",
|
12 |
+
"0-前置数据集获取工具": "0- Ferramenta de aquisição de conjunto de dados pré-frontal",
|
13 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0A-UVR5 separação de voz e acompanhamento instrumental & ferramenta para remover reverberação e atraso",
|
14 |
+
"0b-语音切分工具": "0b- Ferramenta de corte de voz",
|
15 |
+
"0bb-语音降噪工具": "0bb- Ferramenta de redução de ruído de voz",
|
16 |
+
"0c-中文批量离线ASR工具": "0c- Ferramenta chinesa de ASR offline em lote",
|
17 |
+
"0d-语音文本校对标注工具": "0d- Ferramenta de correção e marcação de texto de voz",
|
18 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SOVITS-TTS",
|
19 |
+
"1A-训练集格式化工具": "1A-Ferramenta de formatação de conjunto de dados de treinamento",
|
20 |
+
"1Aa-文本内容": "1AA-Conteúdo do texto",
|
21 |
+
"1Aabc-训练集格式化一键三连": "1AABC-Formatação de conjunto de treinamento em um clique",
|
22 |
+
"1Ab-SSL自监督特征提取": "1AB-Extração de características auto-supervisionadas SSL",
|
23 |
+
"1Ac-语义token提取": "1AC-Extração de token semântico",
|
24 |
+
"1B-微调训练": "1B-Treinamento de ajuste fino",
|
25 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1ba-Treinamento SoVITS. O arquivo de modelo para compartilhamento é gerado em SOVITS_WEIGHTS",
|
26 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1BB-Treinamento GPT. O arquivo de modelo para compartilhamento é gerado em GPT_WEIGHTS",
|
27 |
+
"1C-推理": "1C-raciocínio",
|
28 |
+
"1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. O tempo de processamento do modelo DeEcho-DeReverb é quase o dobro dos outros dois modelos DeEcho;",
|
29 |
+
"1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;": "1. Manter a voz: selecione isso para áudio sem harmonia, que preserva melhor a voz principal do que o HP5. Inclui dois modelos, HP2 e HP3; o HP3 pode permitir um pequeno vazamento de acompanhamento, mas preserva a voz principal um pouco melhor do que o HP2;",
|
30 |
+
"2-GPT-SoVITS-变声": "2-gpt-sovits-mudança de voz",
|
31 |
+
"2、MDX-Net-Dereverb模型挺慢的;": "2. O modelo MDX-Net-Dereverb é bastante lento;",
|
32 |
+
"2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;": "2. Manter apenas a voz principal: selecione isso para áudio com harmonia, pode haver uma redução na voz principal. Inclui um modelo HP5;",
|
33 |
+
"3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "3. A configuração mais limpa recomendada é usar primeiro o MDX-Net e depois o DeEcho-Aggressive.",
|
34 |
+
"3、去混响、去延迟模型(by FoxJoy):": "3. Modelo de remoção de reverberação e atraso (por FoxJoy):",
|
35 |
+
"ASR 模型": "Modelo ASR",
|
36 |
+
"ASR 模型尺寸": "Tamanho do modelo ASR",
|
37 |
+
"数据类型精度": "precisão do tipo de dado",
|
38 |
+
"ASR 语言设置": "Configurações de idioma do ASR",
|
39 |
+
"ASR进程输出信息": "Informações de saída do processo ASR",
|
40 |
+
"GPT模型列表": "Lista de modelos GPT",
|
41 |
+
"GPT训练进程输出信息": "Informações de saída do processo de treinamento GPT",
|
42 |
+
"GPU卡号,只能填1个整数": "Número da placa de vídeo, só é possível preencher com um número inteiro",
|
43 |
+
"GPU卡号以-分割,每个卡号一个进程": "Número da placa de vídeo dividido por-, cada número de placa é um processo",
|
44 |
+
"SSL进程输出信息": "Informações de saída do processo SSL",
|
45 |
+
"SoVITS模型列表": "Lista de modelos SoVITS",
|
46 |
+
"SoVITS训练进程输出信息": "Informações de saída do processo de treinamento SoVITS",
|
47 |
+
"TTS推理WebUI进程输出信息": "Informações de saída do processo webui de raciocínio TTS",
|
48 |
+
"TTS推理进程已关闭": "O processo de inferência TTS foi desativado",
|
49 |
+
"TTS推理进程已开启": "O processo de inferência TTS foi iniciado",
|
50 |
+
"UVR5已关闭": "UVR5 está desativado",
|
51 |
+
"UVR5已开启": "UVR5 está ativado",
|
52 |
+
"UVR5进程输出信息": "Informações de saída do processo UVR5",
|
53 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix: Em que proporção o áudio normalizado é misturado de volta",
|
54 |
+
"gpt采样参数(无参考文本时不要太低。不懂就用默认):": "Parâmetros de amostragem do GPT (não muito baixos quando não houver texto de referência. Use o padrão se não tiver certeza):",
|
55 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "HOP_SIZE: Como calcular a curva de volume, quanto menor a precisão, maior a quantidade de cálculos (não significa que quanto maior a precisão, melhor o efeito)",
|
56 |
+
"max:归一化后最大值多少": "MAX: Qual é o valor máximo após a normalização?",
|
57 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept: Depois de cortar, por quanto tempo no máximo o silêncio é mantido",
|
58 |
+
"min_interval:最短切割间隔": "min_interval: O intervalo de corte mínimo",
|
59 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: Comprimento mínimo de cada segmento. Se o primeiro segmento for muito curto, ele será unido aos segmentos seguintes até exceder este valor",
|
60 |
+
"temperature": "temperatura",
|
61 |
+
"threshold:音量小于这个值视作静音的备选切割点": "Limiar: O volume menor que este valor é considerado como um ponto de corte mudo alternativo",
|
62 |
+
"top_k": "top_k",
|
63 |
+
"top_p": "top_p",
|
64 |
+
"一键三连进程输出信息": "Informações de saída do processo de um clique",
|
65 |
+
"不切": "Não dividir",
|
66 |
+
"中文": "Chinês",
|
67 |
+
"中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e": "Documentação do tutorial em chinês:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e",
|
68 |
+
"中英混合": "Mistura de Chinês e Inglês",
|
69 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "Também é possível inserir arquivos de áudio em lote; escolha uma opção, preferencialmente leia a pasta.",
|
70 |
+
"人声伴奏分离批量处理, 使用UVR5模型。": "Processamento em lote de separação de voz e acompanhamento, usando o modelo UVR5.",
|
71 |
+
"人声提取激进程度": "Grau de agressividade da extração de voz",
|
72 |
+
"伴奏人声分离&去混响&去回声": "Separação de acompanhamento e voz & remoção de reverberação & remoção de eco",
|
73 |
+
"使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。": "Ao usar o modo sem texto de referência, recomenda-se usar um GPT ajustado. Se não conseguir ouvir claramente o áudio de referência (não sabe o que escrever), você pode ativar o modo e ignorar o texto de referência fornecido.",
|
74 |
+
"保存频率save_every_epoch": "Frequência de salvamento save_every_epoch",
|
75 |
+
"凑50字一切": "Complete com 50 caracteres",
|
76 |
+
"凑四句一切": "Complete com quatro frases",
|
77 |
+
"切分后文本": "Texto após divisão",
|
78 |
+
"切分后的子音频的输出根目录": "Diretório raiz de saída do sub-áudio após o corte",
|
79 |
+
"切割使用的进程数": "Número de processos para corte",
|
80 |
+
"刷新模型路径": "Atualizar caminho do modelo",
|
81 |
+
"前端处理后的文本(每句):": "Texto após processamento front-end (por frase):",
|
82 |
+
"去混响/去延迟,附:": "Remoção de reverberação/remoção de atraso, anexo:",
|
83 |
+
"参考音频在3~10秒范围外,请更换!": "O áudio de referência está fora do intervalo de 3 a 10 segundos. Por favor, substitua!",
|
84 |
+
"参考音频的文本": "Texto do áudio de referência",
|
85 |
+
"参考音频的语种": "Idioma do áudio de referência",
|
86 |
+
"合成语音": "Voz sintetizada",
|
87 |
+
"合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。": "Exemplo de formato de caminho de pasta válido: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例 (copie do endereço da barra do gerenciador de arquivos).",
|
88 |
+
"后续将支持转音素、手工修改音素、语音合成分步执行。": "Futuramente, serão suportadas a conversão de fonemas, a modificação manual de fonemas e a execução em etapas da síntese de voz.",
|
89 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。": "Preencha o diretório onde os áudios cortados estão localizados! O caminho completo dos arquivos de áudio lidos = este diretório - concatenação com o nome do arquivo de forma correspondente no arquivo .list (não o caminho completo). Se deixar em branco, use o caminho absoluto no arquivo .list.",
|
90 |
+
"多语种混合": "Mistura de múltiplos idiomas",
|
91 |
+
"实际输入的参考文本:": "Texto de referência realmente inserido:",
|
92 |
+
"实际输入的目标文本(切句后):": "Texto alvo realmente inserido (após divisão de frases):",
|
93 |
+
"实际输入的目标文本(每句):": "Texto alvo realmente inserido (por frase):",
|
94 |
+
"实际输入的目标文本:": "Texto alvo realmente inserido:",
|
95 |
+
"导出文件格式": "Formato de arquivo de exportação",
|
96 |
+
"开启GPT训练": "Ativar treinamento GPT",
|
97 |
+
"开启SSL提取": "Ativar extração SSL",
|
98 |
+
"开启SoVITS训练": "Ativar treinamento SoVITS",
|
99 |
+
"开启一键三连": "Ativar um clique",
|
100 |
+
"开启文本获取": "Ativar obtenção de texto",
|
101 |
+
"开启无参考文本模式。不填参考文本亦相当于开启。": "Ativar o modo sem texto de referência. Não preencher o texto de referência também equivale a ativar.",
|
102 |
+
"开启离线批量ASR": "Ativar ASR offline em lote",
|
103 |
+
"开启语义token提取": "Ativar extração de token semântico",
|
104 |
+
"开启语音切割": "Ativar corte de voz",
|
105 |
+
"开启语音降噪": "Ativar redução de ruído de voz",
|
106 |
+
"怎么切": "Como cortar",
|
107 |
+
"总训练轮数total_epoch": "Total de epoch de treinamento",
|
108 |
+
"总训练轮数total_epoch,不建议太高": "Total de epoch de treinamento, não é recomendável um valor muito alto",
|
109 |
+
"打标工具WebUI已关闭": "A ferramenta de marcação WebUI foi desativado",
|
110 |
+
"打标工具WebUI已开启": "A ferramenta de marcação WebUI está ativada",
|
111 |
+
"打标工具进程输出信息": "Informações de saída do processo da ferramenta de marcação",
|
112 |
+
"指定输出主人声文件夹": "Especificar a pasta de saída da voz principal",
|
113 |
+
"指定输出非主人声文件夹": "Especificar a pasta de saída da voz secundária",
|
114 |
+
"按中文句号。切": "Dividir por ponto final chinês",
|
115 |
+
"按标点符号切": "Dividir por sinais de pontuação",
|
116 |
+
"按英文句号.切": "Dividir por ponto final em inglês",
|
117 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Ferramenta de divisão de texto. Textos muito longos podem não ter bons resultados de síntese, então é recomendado dividir textos muito longos primeiro. A síntese será feita dividindo e recombinando com base nas quebras de linha do texto.",
|
118 |
+
"文本模块学习率权重": "Weight da taxa de aprendizado do módulo de texto",
|
119 |
+
"文本进程输出信息": "Informações de saída do processo de texto",
|
120 |
+
"施工中,请静候佳音": "Em construção, por favor, aguarde por um bom som",
|
121 |
+
"日文": "Japonês",
|
122 |
+
"日英混合": "Mistura de Japonês e Inglês",
|
123 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Se deve salvar apenas o último arquivo CKPT para economizar espaço em disco",
|
124 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Se deve salvar o modelo pequeno final na pasta Weights em cada ponto de salvamento de tempo",
|
125 |
+
"是否开启TTS推理WebUI": "Se deseja ativar o webui de raciocínio TTS",
|
126 |
+
"是否开启UVR5-WebUI": "Se deseja ativar a UVR5-WEBUI",
|
127 |
+
"是否开启dpo训练选项(实验性)": "Se deseja ativar a opção de treinamento DPO (experimental)",
|
128 |
+
"是否开启打标WebUI": "Se deseja abrir o webui de marcação",
|
129 |
+
"是否直接对上次合成结果调整语速。防止随机性。": "Se deve ajustar diretamente a velocidade da fala do último resultado de síntese para evitar aleatoriedade.",
|
130 |
+
"显卡信息": "Informações da placa de vídeo",
|
131 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Este software é de código aberto sob a licença MIT. O autor não tem controle sobre o software. Aqueles que usam o software e difundem os sons exportados pelo software são totalmente responsáveis. <br>Se você não concorda com esta cláusula, não pode usar ou citar nenhum código e arquivo dentro do pacote de software. Consulte o diretório raiz <b>LICENSE</b> para mais detalhes.<br><br> Traduzido por Rafael Godoy Ebert",
|
132 |
+
"模型": "Modelo",
|
133 |
+
"模型分为三类:": "Modelos dividem-se em três categorias:",
|
134 |
+
"模型切换": "Troca de modelo",
|
135 |
+
"每张显卡的batch_size": "Tamanho do lote de cada placa de vídeo",
|
136 |
+
"终止ASR进程": "Encerrar processo ASR",
|
137 |
+
"终止GPT训练": "Encerrar treinamento GPT",
|
138 |
+
"终止SSL提取进程": "Encerrar processo de extração SSL",
|
139 |
+
"终���SoVITS训练": "Encerrar treinamento SoVITS",
|
140 |
+
"终止一键三连": "Encerrar um clique",
|
141 |
+
"终止文本获取进程": "Encerrar processo de obtenção de texto",
|
142 |
+
"终止语义token提取进程": "Encerrar processo de extração de token semântico",
|
143 |
+
"终止语音切割": "Encerrar corte de voz",
|
144 |
+
"终止语音降噪进程": "Encerrar o processo de redução de ruído de voz",
|
145 |
+
"英文": "Inglês",
|
146 |
+
"语义token提取进程输出信息": "Informações de saída do processo de extração de token semântico",
|
147 |
+
"语速": "Velocidade da fala",
|
148 |
+
"语速调整,高为更快": "Ajustar a velocidade da fala, mais alta para mais rápido",
|
149 |
+
"语音切割进程输出信息": "Informações de saída do processo de corte de voz",
|
150 |
+
"语音降噪进程输出信息": "Informações de saída do processo de redução de ruído de voz",
|
151 |
+
"请上传3~10秒内参考音频,超过会报错!": "Por favor, faça upload de um áudio de referência com duração entre 3 e 10 segundos. Áudios fora dessa faixa causarão erro!",
|
152 |
+
"请输入有效文本": "Por favor, insira um texto válido",
|
153 |
+
"转换": "Converter",
|
154 |
+
"输入待处理音频文件夹路径": "Caminho da pasta de arquivos de áudio a ser processados",
|
155 |
+
"输入文件夹路径": "Caminho da pasta de entrada",
|
156 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "Logs de saída/deve haver arquivos e pastas começando com 23456 no diretório do nome do experimento",
|
157 |
+
"输出信息": "Informações de saída",
|
158 |
+
"输出文件夹路径": "Caminho da pasta de saída",
|
159 |
+
"输出的语音": "Áudio de saída",
|
160 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Selecione os modelos armazenados em Sovits_weights e GPT_WEIGHTS. O padrão é o modelo inferior, experiência para 5 segundos de Zero Shot TTS",
|
161 |
+
"降噪结果输出文件夹": "Pasta de saída dos resultados de redução de ruído",
|
162 |
+
"降噪音频文件输入文件夹": "Pasta de entrada dos arquivos de áudio para redução de ruído",
|
163 |
+
"需要合成的切分前文本": "Texto a ser sintetizado antes da divisão",
|
164 |
+
"需要合成的文本": "Texto a ser sintetizado",
|
165 |
+
"需要合成的语种": "Idioma a ser sintetizado",
|
166 |
+
"音频自动切分输入路径,可文件可文件夹": "Caminho de entrada automático de corte de áudio, pode ser um arquivo ou uma pasta",
|
167 |
+
"预训练的GPT模型路径": "Caminho do modelo GPT pre-train",
|
168 |
+
"预训练的SSL模型路径": "Caminho do modelo SSL pre-train",
|
169 |
+
"预训练的SoVITS-D模型路径": "Caminho do modelo SoVITS-D pre-train",
|
170 |
+
"预训练的SoVITS-G模型路径": "Caminho do modelo SoVITS-G pre-train",
|
171 |
+
"预训练的中文BERT模型路径": "Caminho do modelo BERT chinês pre-train"
|
172 |
+
}
|
tools/i18n/locale/ru_RU.json
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb):Это лучший выбор для реверберации с двумя каналами, но он не может устранить реверберацию с одним каналом;",
|
3 |
+
"(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:Устраняет эффект задержки. Aggressive устраняет более тщательно, чем Normal, DeReverb дополнительно устраняет реверберацию, может устранить реверберацию с одного канала, но не полностью устраняет высокочастотную реверберацию.",
|
4 |
+
"*GPT模型列表": "*Список моделей GPT",
|
5 |
+
"*SoVITS模型列表": "*Список моделей SoVITS",
|
6 |
+
"*实验/模型名": "*Название эксперимента/модели",
|
7 |
+
"*文本标注文件": "*Файл текстовой аннотации",
|
8 |
+
"*训练集音频文件目录": "*Директория аудиофайлов обучающего набора",
|
9 |
+
"*请上传并填写参考信息": "*Пожалуйста, загрузите и заполните референтные данные",
|
10 |
+
"*请填写需要合成的目标文本和语种模式": "*Пожалуйста, введите целевой текст для синтеза и режим языка",
|
11 |
+
".list标注文件的路径": "Путь к файлу аннотации .list",
|
12 |
+
"0-前置数据集获取工具": "0-Инструмент для получения предварительного набора данных",
|
13 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-Инструмент для разделения вокала и аккомпанемента UVR5 & устранения реверберации и задержек",
|
14 |
+
"0b-语音切分工具": "0b-Инструмент для разделения речи",
|
15 |
+
"0bb-语音降噪工具": "0bb-Инструмент для подавления шумов в голосе",
|
16 |
+
"0c-中文批量离线ASR工具": "0c-Инструмент для пакетной офлайн ASR на китайском",
|
17 |
+
"0d-语音文本校对标注工具": "0d-Инструмент для коррекции и аннотации текста речи",
|
18 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
19 |
+
"1A-训练集格式化工具": "1A-Инструмент для форматирования обучающего набора",
|
20 |
+
"1Aa-文本内容": "1Aa-Содержание текста",
|
21 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-Форматирование обучающего набора одним нажатием",
|
22 |
+
"1Ab-SSL自监督特征提取": "1Ab-Самоконтролируемое извлечение признаков SSL",
|
23 |
+
"1Ac-语义token提取": "1Ac-Извлечение семантических токенов",
|
24 |
+
"1B-微调训练": "1B-Дообучение",
|
25 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-Обучение SoVITS. Файлы моделей для распространения находятся в SoVITS_weights.",
|
26 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-Обучение GPT. Файлы моделей для распространения находятся в GPT_weights.",
|
27 |
+
"1C-推理": "1C-Инференс",
|
28 |
+
"1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. Время обработки модели DeEcho-DeReverb почти вдвое больше, чем у двух других моделей DeEcho;",
|
29 |
+
"1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;": "1. Сохранение голоса: выберите этот для аудио без гармоний, сохранение голоса будет лучше, чем HP5. Встроенные модели HP2 и HP3, HP3 может немного пропускать сопровождение, но сохраняет голос немного лучше, чем HP2;",
|
30 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-переозвучивание",
|
31 |
+
"2、MDX-Net-Dereverb模型挺慢的;": "2. Модель MDX-Net-Dereverb довольно медленная;",
|
32 |
+
"2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;": "2. Сохранение только основного голоса: выберите это для аудио с гармониями, может ослабить основной голос. Встрое��ная модель HP5;",
|
33 |
+
"3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "3. Лично рекомендованная самая чистая конфигурация — сначала MDX-Net, затем DeEcho-Aggressive.",
|
34 |
+
"3、去混响、去延迟模型(by FoxJoy):": "3. Модель удаления реверберации и задержек (от FoxJoy):",
|
35 |
+
"ASR 模型": "Модель ASR",
|
36 |
+
"ASR 模型尺寸": "Размер модели ASR",
|
37 |
+
"数据类型精度": "точность типа данных",
|
38 |
+
"ASR 语言设置": "Настройки языка ASR",
|
39 |
+
"ASR进程输出信息": "Информация о процессе ASR",
|
40 |
+
"GPT模型列表": "Список моделей GPT",
|
41 |
+
"GPT训练进程输出信息": "Информация о процессе обучения GPT",
|
42 |
+
"GPU卡号,只能填1个整数": "Номер GPU, можно указать только одно целое число",
|
43 |
+
"GPU卡号以-分割,每个卡号一个进程": "Номера GPU разделяются дефисом, на каждый номер отдельный процесс",
|
44 |
+
"SSL进程输出信息": "Информация о процессе SSL",
|
45 |
+
"SoVITS模型列表": "Список моделей SoVITS",
|
46 |
+
"SoVITS训练进程输出信息": "Информация о процессе обучения SoVITS",
|
47 |
+
"TTS推理WebUI进程输出信息": "Информация о процессе TTS инференса WebUI",
|
48 |
+
"TTS推理进程已关闭": "Процесс TTS-инференции остановлен",
|
49 |
+
"TTS推理进程已开启": "Процесс TTS-инференции запущен",
|
50 |
+
"UVR5已关闭": "UVR5 выключен",
|
51 |
+
"UVR5已开启": "UVR5 включен",
|
52 |
+
"UVR5进程输出信息": "Вывод информации процесса UVR5",
|
53 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix:Какая доля нормализованного аудио смешивается",
|
54 |
+
"gpt采样参数(无参考文本时不要太低。不懂就用默认):": "Параметры выборки GPT (не устанавливайте слишком низкие значения, если нет ссылочного текста. Используйте значения по умолчанию, если не уверены):",
|
55 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size:Как рассчитывается кривая громкости, чем меньше, тем выше точность и больше вычислительная нагрузка (большая точность не всегда означает лучший результат)",
|
56 |
+
"max:归一化后最大值多少": "max:Максимальное значение после нормализации",
|
57 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept:Максимальная длительность тишины после разреза",
|
58 |
+
"min_interval:最短切割间隔": "min_interval:Минимальный интервал разреза",
|
59 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:Минимальная длина каждого отрезка; если первый отрезок слишком короткий, он будет соединен с последующими до достижения этого значения",
|
60 |
+
"temperature": "temperature",
|
61 |
+
"threshold:音量小于这个值视作静音的备选切割点": "threshold:Значение громкости ниже этого считается тишиной для альтернативной точки разреза",
|
62 |
+
"top_k": "top_k",
|
63 |
+
"top_p": "top_p",
|
64 |
+
"一键三连进程输出信息": "Информация о процессе одного нажатия",
|
65 |
+
"不切": "Не разрезать",
|
66 |
+
"中文": "Китайский",
|
67 |
+
"中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e": "Документация на китайском языке:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e",
|
68 |
+
"中英混合": "Китайский и английский",
|
69 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "Можно также импортировать несколько аудиофайлов. Если путь к папке существует, то этот ввод игнорируется.",
|
70 |
+
"人声伴奏分离批量处理, 使用UVR5模型。": "Обработка разделения вокала и аккомпанемента пакетно с использованием модели UVR5.",
|
71 |
+
"人声提取激进程度": "Степень агрессивности извлечения вокала",
|
72 |
+
"伴奏人声分离&去混响&去回声": "Разделение вокала/акком��анемента и удаление эхо",
|
73 |
+
"使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。": "При использовании режима без референсного текста рекомендуется использовать настроенную модель GPT. Если не удается разобрать, что говорит референсное аудио (не знаете, что писать), можете включить этот режим, и он проигнорирует введенный референсный текст.",
|
74 |
+
"保存频率save_every_epoch": "Частота сохранения save_every_epoch",
|
75 |
+
"凑50字一切": "Соберите все в 50 символов",
|
76 |
+
"凑四句一切": "Собрать четыре предложения и разрезать",
|
77 |
+
"切分后文本": "Текст после разделения",
|
78 |
+
"切分后的子音频的输出根目录": "Корневой каталог вывода для подаудио после разделения",
|
79 |
+
"切割使用的进程数": "Количество процессов, используемых для разрезания",
|
80 |
+
"刷新模型路径": "Обновить путь к модели",
|
81 |
+
"前端处理后的文本(每句):": "Текст после предварительной обработки (каждое предложение):",
|
82 |
+
"去混响/去延迟,附:": "Удаление реверберации/удаление задержки, примечание:",
|
83 |
+
"参考音频在3~10秒范围外,请更换!": "Референтное аудио вне диапазона 3~10 секунд, пожалуйста, замените!",
|
84 |
+
"参考音频的文本": "Текст референтного аудио",
|
85 |
+
"参考音频的语种": "Язык референтного аудио",
|
86 |
+
"合成语音": "Синтезированный голос",
|
87 |
+
"合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。": "Пример допустимого формата пути к папке: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例 (просто скопируйте из адресной строки файлового менеджера).",
|
88 |
+
"后续将支持转音素、手工修改音素、语音合成分步执行。": "В будущем будет поддерживаться преобразование в фонемы, ручная коррекция фонем, пошаговая синтезация речи.",
|
89 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。": "Заполните каталог, где находятся аудиофайлы после разрезания! Полный путь к читаемым аудиофайлам = каталог - файл .list, имя файла соответствует волне (не полный путь). Если оставить пустым, будет использоваться абсолютный путь из файла .list.",
|
90 |
+
"多语种混合": "Смешанные языки",
|
91 |
+
"实际输入的参考文本:": "Фактически введенный референсный текст:",
|
92 |
+
"实际输入的目标文本(切句后):": "Фактически введенный целевой текст (после разбиения на предложения):",
|
93 |
+
"实际输入的目标文本(每句):": "Фактически введенный целевой текст (каждое предложение):",
|
94 |
+
"实际输入的目标文本:": "Фактически введенный целевой текст:",
|
95 |
+
"导出文件格式": "Формат выходных файлов",
|
96 |
+
"开启GPT训练": "Включить обучение GPT",
|
97 |
+
"开启SSL提取": "Включить извлечение SSL",
|
98 |
+
"开启SoVITS训练": "Включить обучение SoVITS",
|
99 |
+
"开启一键三连": "Включить одно нажатие",
|
100 |
+
"开启文本获取": "Включить получение текста",
|
101 |
+
"开启无参考文本模式。不填参考文本亦相当于开启。": "Включить режим без референтного текста. Не заполняя референтный текст, вы также включаете этот режим.",
|
102 |
+
"开启离线批量ASR": "Включить пакетную офлайн ASR",
|
103 |
+
"开启语义token提取": "Включить извлечение семантических токенов",
|
104 |
+
"开启语音切割": "Включить разрезание речи",
|
105 |
+
"开启语音降噪": "Включить шумоподавление",
|
106 |
+
"怎么切": "Как разрезать",
|
107 |
+
"总训练轮数total_epoch": "Общее количество эпох обучения total_epoch",
|
108 |
+
"总训练轮数total_epoch,不建议太高": "Общее количество эпох обучения total_epoch, не рекомендуется слишком высокое",
|
109 |
+
"打标工具WebUI已关闭": "WebUI инструмента маркировки остановлен",
|
110 |
+
"打标工具WebUI已开启": "WebUI инструмента маркировки запущен",
|
111 |
+
"打标工具进程输出信息": "Информация о процессе аннотации",
|
112 |
+
"指定输出主人声文件夹": "Путь к папке для сохранения вокала:",
|
113 |
+
"指定输出非主人声文件夹": "Путь к папке для сохранения аккомпанемента:",
|
114 |
+
"按中文句号。切": "Разделение по китайским точкам.",
|
115 |
+
"按标点符号切": "Разрезать по пунктуационным знакам",
|
116 |
+
"按英文句号.切": "Разрезать по английской точке.",
|
117 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Инструмент для разделения текста. Слишком длинные тексты могут не давать хороших результатов синтеза, поэтому рекомендуется сначала их разделить. Синтез будет выполняться отдельно для каждого абзаца, а затем результаты будут соединены вместе.",
|
118 |
+
"文本模块学习率权重": "Веса скорости обучения текстового модуля",
|
119 |
+
"文本进程输出信息": "Информация о процессе обработки текста",
|
120 |
+
"施工中,请静候佳音": "В разработке, ожидайте хороших новостей",
|
121 |
+
"日文": "Японский",
|
122 |
+
"日英混合": "Японский и английский",
|
123 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Сохранять только последние файлы ckpt для экономии места на диске?",
|
124 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Сохранять финальную версию модели в папке weights на каждом этапе сохранения?",
|
125 |
+
"是否开启TTS推理WebUI": "Включить TTS инференс WebUI",
|
126 |
+
"是否开启UVR5-WebUI": "Включить UVR5-WebUI",
|
127 |
+
"是否开启dpo训练选项(实验性)": "Включить опцию тренировки dpo (экспериментально)",
|
128 |
+
"是否开启打标WebUI": "Включить интерфейс веб-аннотации",
|
129 |
+
"是否直接对上次合成结果调整语速。防止随机性。": "Следует ли непосредственно регулировать скорость речи последнего синтезированного результата, чтобы избежать случайности?",
|
130 |
+
"显卡信息": "Информация о видеокарте",
|
131 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Это программное обеспечение открыто по лицензии MIT, автор не имеет никакого контроля над программным обеспечением, пользователи программного обеспечения и те, кто распространяет звуки, экспортированные программным обеспечением, несут полную ответственность. <br>Если вы не согласны с этими условиями, вы не можете использовать или ссылаться на любой код и файлы в пакете программного обеспечения. Смотрите <b>LICENSE</b> в корневом каталоге.",
|
132 |
+
"模型": "Модели",
|
133 |
+
"模型分为三类:": "Модели делятся на три типа:",
|
134 |
+
"模型切换": "Переключение модели",
|
135 |
+
"每张显卡的batch_size": "Размер пакета для каждой видеокарты",
|
136 |
+
"终止ASR进程": "Прекратить процесс ASR",
|
137 |
+
"终止GPT训练": "Прекратить обучение GPT",
|
138 |
+
"终止SSL提取进程": "Прекратить процесс извлечения SSL",
|
139 |
+
"终止SoVITS训练": "Прекратить обучение SoVITS",
|
140 |
+
"终止一键三连": "Прекратить одно нажатие",
|
141 |
+
"终止文本获取进程": "Прекратить процесс получения текста",
|
142 |
+
"终止语义token提取进程": "Прекратить процесс извлечения семантических токенов",
|
143 |
+
"终止语音切割": "Прекратить разрезание речи",
|
144 |
+
"终止语音降噪进程": "Прекратить процесс шумоподавления",
|
145 |
+
"英文": "Английский",
|
146 |
+
"语义token提取进程输出信息": "Информация о процессе извлечения семантических токенов",
|
147 |
+
"语速": "Скорость речи",
|
148 |
+
"语速调整,高为更快": "Регулировка скорости речи, чем выше, тем быстрее",
|
149 |
+
"语音切割进程输出信息": "Информация о процессе разрезания речи",
|
150 |
+
"语音降噪进程输出信息": "Информация о процессе шумоподавления",
|
151 |
+
"请上传3~10秒内参考音频,超过会报错!": "Пожалуйста, загрузите референтное аудио длительностью от 3 до 10 секунд, иначе будет ошибка!",
|
152 |
+
"请输入有效文本": "Введите действительный текст",
|
153 |
+
"转换": "Преобразовать",
|
154 |
+
"输入待处理音频文件夹路径": "Путь к папке с аудиофайлами для обработки:",
|
155 |
+
"输入文件夹路径": "Введите путь к папке",
|
156 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "В директории logs/имя_эксперимента должны быть файлы и папки, начинающиеся с 23456",
|
157 |
+
"输出信息": "Статистика",
|
158 |
+
"输出文件夹路径": "Путь к папке для вывода",
|
159 |
+
"输出的语音": "Выводимый звук",
|
160 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Выберите модель, сохраненную в SoVITS_weights и GPT_weights после обучения. По умолчанию используется базовая модель для 5-секундного Zero Shot TTS.",
|
161 |
+
"降噪结果输出文件夹": "Папка для вывода результатов шумоподавления",
|
162 |
+
"降噪音频文件输入文件夹": "Папка для ввода аудиофайлов для шумоподавления",
|
163 |
+
"需要合成的切分前文本": "Текст для синтеза до разделения",
|
164 |
+
"需要合成的文本": "Текст для синтеза",
|
165 |
+
"需要合成的语种": "Язык для синтеза",
|
166 |
+
"音频自动切分输入路径,可文件可文件夹": "Путь ввода для автоматического разделения аудио, может быть файлом или папкой",
|
167 |
+
"预训练的GPT模型路径": "Путь к предварительно обученной модели GPT",
|
168 |
+
"预训练的SSL模型路径": "Путь к предварительно обученной модели SSL",
|
169 |
+
"预训练的SoVITS-D模型路径": "Путь к предварительно обученной модели SoVITS-D",
|
170 |
+
"预训练的SoVITS-G模型路径": "Путь к предварительно обученной модели SoVITS-G",
|
171 |
+
"预训练的中文BERT模型路径": "Путь к предварительно обученной китайской модели BERT"
|
172 |
+
}
|
tools/i18n/locale/tr_TR.json
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb):İki kanallı yankılar için en iyi seçimdir, ancak tek kanallı yankıları ortadan kaldıramaz;",
|
3 |
+
"(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:Gecikme etkilerini giderir. Aggressive, Normal'dan daha kapsamlı bir şekilde giderir, DeReverb ek olarak yankıyı giderir, tek kanallı yankıyı giderebilir, ancak yüksek frekanslı plaka yankısını tamamen gideremez.",
|
4 |
+
"*GPT模型列表": "*GPT model listesi",
|
5 |
+
"*SoVITS模型列表": "*SoVITS model listesi",
|
6 |
+
"*实验/模型名": "*Deney/model adı",
|
7 |
+
"*文本标注文件": "*Metin etiketleme dosyası",
|
8 |
+
"*训练集音频文件目录": "*Eğitim seti ses dosyası dizini",
|
9 |
+
"*请上传并填写参考信息": "*Lütfen referans bilgilerini yükleyin ve doldurun",
|
10 |
+
"*请填写需要合成的目标文本和语种模式": "*Lütfen sentezlenecek hedef metni ve dil modunu giriniz.",
|
11 |
+
".list标注文件的路径": ".list etiketleme dosyasının yolu",
|
12 |
+
"0-前置数据集获取工具": "0-Ön veri seti alma aracı",
|
13 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 vokal eşlik ayırma & yankıyı giderme gecikme aracı",
|
14 |
+
"0b-语音切分工具": "0b-Ses bölme aracı",
|
15 |
+
"0bb-语音降噪工具": "0bb-Ses gürültü azaltma aracı",
|
16 |
+
"0c-中文批量离线ASR工具": "0c-Çince toplu offline ASR aracı",
|
17 |
+
"0d-语音文本校对标注工具": "0d-Ses ve metin düzeltme etiketleme aracı",
|
18 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
19 |
+
"1A-训练集格式化工具": "1A-Eğitim seti formatlama aracı",
|
20 |
+
"1Aa-文本内容": "1Aa-Metin içeriği",
|
21 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-Eğitim seti formatlama tek tuşla üçleme",
|
22 |
+
"1Ab-SSL自监督特征提取": "1Ab-SSL kendi kendine denetimli özellik çıkarma",
|
23 |
+
"1Ac-语义token提取": "1Ac-Anlamsal token çıkarma",
|
24 |
+
"1B-微调训练": "1B-Fine-tuning eğitimi",
|
25 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS eğitimi. Paylaşım için model dosyaları SoVITS_weights altında çıkarılır.",
|
26 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT eğitimi. Paylaşım için model dosyaları GPT_weights altında çıkarılır.",
|
27 |
+
"1C-推理": "1C-Çıkarım",
|
28 |
+
"1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. DeEcho-DeReverb modelinin işleme süresi, diğer iki DeEcho modelinin neredeyse iki katıdır;",
|
29 |
+
"1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;": "1. Ses koruma: Arka vokal içermeyen sesler için bu seçeneği kullanın, ana sesi HP5'ten daha iyi korur. HP2 ve HP3 adlı iki model içerir; HP3, arka vokali biraz kaçırabilir ancak ana sesi HP2'ye göre biraz daha iyi korur;",
|
30 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-Ses Değiştirme",
|
31 |
+
"2、MDX-Net-Dereverb模型挺慢的;": "2. MDX-Net-Dereverb modeli oldukça yavaştır;",
|
32 |
+
"2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;": "2. Sadece ana sesi koruma: Arka vokalleri içeren sesler için bu seçeneği kullanın, ana sesi zayıflatabilir. İçinde HP5 modeli var;",
|
33 |
+
"3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "3. Kişisel olarak en temiz konfigürasyon MDX-Net'in ardından DeEcho-Aggressive'dir.",
|
34 |
+
"3、去混响、去延迟模型(by FoxJoy):": "3. Yankı ve gecikme giderme modeli (FoxJoy tarafından):",
|
35 |
+
"ASR 模型": "ASR modeli",
|
36 |
+
"ASR 模型尺寸": "ASR model boyutu",
|
37 |
+
"数据类型精度": "veri türü doğruluğu",
|
38 |
+
"ASR 语言设置": "ASR dil ayarları",
|
39 |
+
"ASR进程输出信息": "ASR işlemi çıktı bilgisi",
|
40 |
+
"GPT模型列表": "GPT model listesi",
|
41 |
+
"GPT训练进程输出信息": "GPT eğitimi işlemi çıktı bilgisi",
|
42 |
+
"GPU卡号,只能填1个整数": "GPU kart numarası, sadece bir tamsayı girilebilir",
|
43 |
+
"GPU卡号以-分割,每个卡号一个进程": "GPU kart numaraları - ile ayrılır, her kart numarası için bir işlem",
|
44 |
+
"SSL进程输出信息": "SSL işlemi çıktı bilgisi",
|
45 |
+
"SoVITS模型列表": "SoVITS model listesi",
|
46 |
+
"SoVITS训练进程输出信息": "SoVITS eğitimi işlemi çıktı bilgisi",
|
47 |
+
"TTS推理WebUI进程输出信息": "TTS çıkarımı WebUI işlemi çıktı bilgisi",
|
48 |
+
"TTS推理进程已关闭": "TTS çıkarım işlemi kapatıldı",
|
49 |
+
"TTS推理进程已开启": "TTS çıkarım işlemi başlatıldı",
|
50 |
+
"UVR5已关闭": "UVR5 kapandı",
|
51 |
+
"UVR5已开启": "UVR5 açıldı",
|
52 |
+
"UVR5进程输出信息": "UVR5 işlem çıktı bilgisi",
|
53 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix:Normalizasyondan sonraki sesin ne kadarlık bir oranı karıştırılsın",
|
54 |
+
"gpt采样参数(无参考文本时不要太低。不懂就用默认):": "GPT örnekleme parametreleri (referans metin olmadığında çok düşük olmamalıdır. Emin değilseniz varsayılanı kullanın):",
|
55 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size:Ses seviyesi eğrisi nasıl hesaplanır, ne kadar küçükse hassasiyet o kadar yüksek ve hesaplama yükü o kadar artar (hassasiyet arttıkça etki mutlaka daha iyi olmaz)",
|
56 |
+
"max:归一化后最大值多少": "max:Normalizasyondan sonra maksimum değer ne kadar",
|
57 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept:Kesimden sonra en fazla ne kadar sessizlik bırakılır",
|
58 |
+
"min_interval:最短切割间隔": "min_interval:Minimum kesim aralığı",
|
59 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: bölümün minimum uzunluğu, ilk bölüm çok kısa ise, bu değeri aşana kadar sonraki bölümlerle birleştirilir",
|
60 |
+
"temperature": "temperature",
|
61 |
+
"threshold:音量小于这个值视作静音的备选切割点": "threshold:Ses bu değerden düşükse sessiz olarak kabul edilen alternatif kesim noktası",
|
62 |
+
"top_k": "top_k",
|
63 |
+
"top_p": "top_p",
|
64 |
+
"一键三连进程输出信息": "Tek tuşla üçleme işlemi çıktı bilgisi",
|
65 |
+
"不切": "Kesme",
|
66 |
+
"中文": "Çince",
|
67 |
+
"中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e": "Çince öğretici belge:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e",
|
68 |
+
"中英混合": "Çince ve İngilizce karışık",
|
69 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "Ses dosyaları ayrıca toplu olarak, iki seçimle, öncelikli okuma klasörüyle içe aktarılabilir",
|
70 |
+
"人声伴奏分离批量处理, 使用UVR5模型。": "Vokal ve akor ayırma toplu işleme, UVR5 modelini kullanarak.",
|
71 |
+
"人声提取激进程度": "Vokal çıkarma agresiflik derecesi",
|
72 |
+
"伴奏人声分离&去混响&去回声": "Vokal/Müzik Ayrıştırma ve Yankı Giderme",
|
73 |
+
"使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。": "Referans metin modu olmadan kullanıldığında, referans sesi net duyulmadığında (ne yazılacağı bilinmiyorsa) açık bırakılması önerilir, bu durumda girilen referans metni göz ardı edilir.",
|
74 |
+
"保存频率save_every_epoch": "Kayıt sıklığı save_every_epoch",
|
75 |
+
"凑50字一切": "50 kelime birleştir ve kes",
|
76 |
+
"凑四句一切": "Dört cümleyi bir araya getirip kes",
|
77 |
+
"切分后文本": "Bölündükten sonra metin",
|
78 |
+
"切分后的子音频的输出根目录": "Bölündükten sonra alt ses dosyalarının çıktı kök dizini",
|
79 |
+
"切割使用的进程数": "Kesim için kullanılan işlem sayısı",
|
80 |
+
"刷新模型路径": "Model yolu yenile",
|
81 |
+
"前端处理后的文本(每句):": "Ön işleme tabi tutulan metin (her cümle):",
|
82 |
+
"去混响/去延迟,附:": "Yankı giderme/Geçikme giderme, ek:",
|
83 |
+
"参考音频在3~10秒范围外,请更换!": "Referans ses dosyası 3~10 saniye aralığının dışında, lütfen değiştirin!",
|
84 |
+
"参考音频的文本": "Referans ses dosyasının metni",
|
85 |
+
"参考音频的语种": "Referans ses dosyasının dili",
|
86 |
+
"合成语音": "Ses sentezi",
|
87 |
+
"合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。": "Geçerli klasör yolu formatı örneği: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例 (dosya yöneticisi adres çubuğundan kopyalayabilirsiniz).",
|
88 |
+
"后续将支持转音素、手工修改音素、语音合成分步执行。": "İlerleyen zamanlarda fonem dönüştürme, manuel fonem düzenleme ve adım adım ses sentezi desteklenecek.",
|
89 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。": "Kesmeye uygun ses dosyalarının bulunduğu dizini doldurun! Okunan ses dosyasının tam yolu = bu dizin + list dosyasındaki dalga biçimiyle eşleşen dosya adı (tam yol değil). Boş bırakılırsa, .list dosyasındaki tam yol kullanılır.",
|
90 |
+
"多语种混合": "Çok dilli karışım",
|
91 |
+
"实际输入的参考文本:": "Gerçekten girilen referans metin:",
|
92 |
+
"实际输入的目标文本(切句后):": "Gerçekten girilen hedef metin (cümleler kesildikten sonra):",
|
93 |
+
"实际输入的目标文本(每句):": "Gerçekten girilen hedef metin (her cümle):",
|
94 |
+
"实际输入的目标文本:": "Gerçekten girilen hedef metin:",
|
95 |
+
"导出文件格式": "Dışa aktarma dosya formatı",
|
96 |
+
"开启GPT训练": "GPT eğitimini başlat",
|
97 |
+
"开启SSL提取": "SSL çıkarmayı başlat",
|
98 |
+
"开启SoVITS训练": "SoVITS eğitimini başlat",
|
99 |
+
"开启一键三连": "Tek tuşla üçlemeyi başlat",
|
100 |
+
"开启文本获取": "Metin alma başlat",
|
101 |
+
"开启无参考文本模式。不填参考文本亦相当于开启。": "Referans metni olmayan mod açık. Referans metni doldurulmazsa bu mod otomatik olarak açılır.",
|
102 |
+
"开启离线批量ASR": "Offline toplu ASR başlat",
|
103 |
+
"开启语义token提取": "Anlamsal token çıkarmayı başlat",
|
104 |
+
"开启语音切割": "Ses kesimi başlat",
|
105 |
+
"开启语音降噪": "Ses gürültü azaltmayı başlat",
|
106 |
+
"怎么切": "Nasıl kesilir",
|
107 |
+
"总训练轮数total_epoch": "Toplam eğitim turu sayısı total_epoch",
|
108 |
+
"总训练轮数total_epoch,不建议太高": "Toplam eğitim turu sayısı total_epoch, çok yüksek önerilmez",
|
109 |
+
"打标工具WebUI已关闭": "Etiketleme aracı WebUI'si kapatıldı",
|
110 |
+
"打标工具WebUI已开启": "Etiketleme aracı WebUI'si açıldı",
|
111 |
+
"打标工具进程输出信息": "Etiketleme aracı işlemi çıktı bilgisi",
|
112 |
+
"指定输出主人声文件夹": "Vokal için çıkış klasörünü belirtin:",
|
113 |
+
"指定输出非主人声文件夹": "Müzik ve diğer sesler için çıkış klasörünü belirtin:",
|
114 |
+
"按中文句号。切": "Çince dönem işaretine göre kes",
|
115 |
+
"按标点符号切": "Noktalama işaretlerine göre kes",
|
116 |
+
"按英文句号.切": "İngilizce nokta işaretine göre kes",
|
117 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Metin bölme aracı. Çok uzun metinlerin sentezi her zaman iyi sonuçlar vermez, bu yüzden önerilen önce kesmektir. Sentez, metnin yeni satırlarına göre ayrı ayrı yapılır ve daha sonra birleştirilir.",
|
118 |
+
"文本模块学习率权重": "Metin modülü öğrenme oranı ağırlığı",
|
119 |
+
"文本进程输出信息": "Metin işlemi çıktı bilgisi",
|
120 |
+
"施工中,请静候佳音": "Yapım aşamasında, lütfen iyi haberler için bekleyin",
|
121 |
+
"日文": "Japonca",
|
122 |
+
"日英混合": "Japonca ve İngilizce karışımı",
|
123 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Sadece en yeni ckpt dosyasını kaydederek disk alanından tasarruf edilsin mi",
|
124 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Her kayıt zamanında son küçük modelin weights klasörüne kaydedilmesi gerekiyor mu",
|
125 |
+
"是否开启TTS推理WebUI": "TTS çıkarımı WebUI'si başlatılsın mı",
|
126 |
+
"是否开启UVR5-WebUI": "UVR5-WebUI açılsın mı",
|
127 |
+
"是否开启dpo训练选项(实验性)": "dpo eğitim seçeneği açılsın mı? (deneysel)",
|
128 |
+
"是否开启打标WebUI": "Etiketleme WebUI'si başlatılsın mı",
|
129 |
+
"是否直接对上次合成结果调整语速。防止随机性。": "Son sentez sonucunun konuşma hızını doğrudan ayarlamak, rastlantısallığı önlemek için mi?",
|
130 |
+
"显卡信息": "Ekran kartı bilgisi",
|
131 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Bu yazılım MIT lisansı ile açık kaynaktır, yazar yazılım üzerinde herhangi bir kontrol gücüne sahip değildir, yazılımı kullanıcılar ve yazılım tarafından üretilen sesleri yayınlayanlar tüm sorumluluğu üstlenir. <br>Eğer bu şartları kabul etmiyorsanız, yazılım paketindeki hiçbir kodu veya dosyayı kullanamaz veya atıfta bulunamazsınız. Ayrıntılar için ana dizindeki <b>LICENSE</b>'ı görün.",
|
132 |
+
"模型": "Model",
|
133 |
+
"模型分为三类:": "Modeller üç türdedir:",
|
134 |
+
"模型切换": "Model değiştirme",
|
135 |
+
"每张显卡的batch_size": "Her bir ekran kartı için batch_size",
|
136 |
+
"终止ASR进程": "ASR işlemini durdur",
|
137 |
+
"终止GPT训练": "GPT eğitimini durdur",
|
138 |
+
"终止SSL提取进程": "SSL çıkarma işlemini durdur",
|
139 |
+
"终止SoVITS训练": "SoVITS eğitimini durdur",
|
140 |
+
"终止一键三连": "Tek tuşla üçlemeyi durdur",
|
141 |
+
"终止文本获取进程": "Metin alma işlemini durdur",
|
142 |
+
"终止语义token提取进程": "Anlamsal token çıkarma işlemini durdur",
|
143 |
+
"终止语音切割": "Ses kesimini durdur",
|
144 |
+
"终止语音降噪进程": "Gürültü azaltma işlemini durdur",
|
145 |
+
"英文": "İngilizce",
|
146 |
+
"语义token提取进程输出信息": "Anlamsal token çıkarma işlemi çıktı bilgisi",
|
147 |
+
"语速": "Konuşma hızı",
|
148 |
+
"语速调整,高为更快": "Konuşma hızını ayarla, yüksek daha hızlı",
|
149 |
+
"语音切割进程输出信息": "Ses kesim işlemi çıktı bilgisi",
|
150 |
+
"语音降噪进程输出信息": "Gürültü azaltma işlemi çıktı bilgisi",
|
151 |
+
"请上传3~10秒内参考音频,超过会报错!": "Lütfen 3~10 saniye arasında bir referans ses dosyası yükleyin, aşım durumunda hata verilecektir!",
|
152 |
+
"请输入有效文本": "Geçerli metin girin",
|
153 |
+
"转换": "Dönüştür",
|
154 |
+
"输入待处理音频文件夹路径": "İşlenecek ses klasörünün yolunu girin:",
|
155 |
+
"输入文件夹路径": "Dosya klasörü yolu girin",
|
156 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "Çıktı logs/deney adı dizininde 23456 ile başlayan dosya ve klasörler olmalı",
|
157 |
+
"输出信息": "Çıkış bilgisi",
|
158 |
+
"输出文件夹路径": "Çıktı klasörü yolu",
|
159 |
+
"输出的语音": "Çıktı sesi",
|
160 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Eğitimi tamamlanmış ve SoVITS_weights ile GPT_weights altına kaydedilmiş modeli seçin. Varsayılan bir temel modeldir, 5 saniyelik Zero Shot TTS deneyimi için kullanılır.",
|
161 |
+
"降噪结果输出文件夹": "Gürültü azaltma sonuçları çıktı klasörü",
|
162 |
+
"降噪音频文件输入文件夹": "Gürültü azaltma ses dosyaları giriş klasörü",
|
163 |
+
"需要合成的切分前文本": "Bölünmeden önce sentezlenmesi gereken metin",
|
164 |
+
"需要合成的文本": "Sentezlenmesi gereken metin",
|
165 |
+
"需要合成的语种": "Sentezlenmesi gereken dil",
|
166 |
+
"音频自动切分输入路径,可文件可文件夹": "Ses otomatik bölme giriş yolu, dosya veya klasör olabilir",
|
167 |
+
"预训练的GPT模型路径": "Ön eğitilmiş GPT model yolu",
|
168 |
+
"预训练的SSL模型路径": "Ön eğitilmiş SSL model yolu",
|
169 |
+
"预训练的SoVITS-D模型路径": "Ön eğitilmiş SoVITS-D model yolu",
|
170 |
+
"预训练的SoVITS-G模型路径": "Ön eğitilmiş SoVITS-G model yolu",
|
171 |
+
"预训练的中文BERT模型路径": "Ön eğitilmiş Çince BERT model yolu"
|
172 |
+
}
|
tools/i18n/locale/zh_CN.json
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;",
|
3 |
+
"(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:去除延迟效果。Aggressive 比 Normal 去除得更彻底,DeReverb 额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。",
|
4 |
+
"*GPT模型列表": "*GPT模型列表",
|
5 |
+
"*SoVITS模型列表": "*SoVITS模型列表",
|
6 |
+
"*实验/模型名": "*实验/模型名",
|
7 |
+
"*文本标注文件": "*文本标注文件",
|
8 |
+
"*训练集音频文件目录": "*训练集音频文件目录",
|
9 |
+
"*请上传并填写参考信息": "*请上传并填写参考信息",
|
10 |
+
"*请填写需要合成的目标文本和语种模式": "*请填写需要合成的目标文本和语种模式",
|
11 |
+
".list标注文件的路径": ".list标注文件的路径",
|
12 |
+
"0-前置数据集获取工具": "0-前置数据集获取工具",
|
13 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5人声伴奏分离&去混响去延迟工具",
|
14 |
+
"0b-语音切分工具": "0b-语音切分工具",
|
15 |
+
"0bb-语音降噪工具": "0bb-语音降噪工具",
|
16 |
+
"0c-中文批量离线ASR工具": "0c-中文批量离线ASR工具",
|
17 |
+
"0d-语音文本校对标注工具": "0d-语音文本校对标注工具",
|
18 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
19 |
+
"1A-训练集格式化工具": "1A-训练集格式化工具",
|
20 |
+
"1Aa-文本内容": "1Aa-文本内容",
|
21 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-训练集格式化一键三连",
|
22 |
+
"1Ab-SSL自监督特征提取": "1Ab-SSL自监督特征提取",
|
23 |
+
"1Ac-语义token提取": "1Ac-语义token提取",
|
24 |
+
"1B-微调训练": "1B-微调训练",
|
25 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。",
|
26 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。",
|
27 |
+
"1C-推理": "1C-推理",
|
28 |
+
"1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;",
|
29 |
+
"1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;": "1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;",
|
30 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-变声",
|
31 |
+
"2、MDX-Net-Dereverb模型挺慢的;": "2、MDX-Net-Dereverb模型挺慢的;",
|
32 |
+
"2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;": "2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;",
|
33 |
+
"3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。",
|
34 |
+
"3、去混响、去延迟模型(by FoxJoy):": "3、去混响、去延迟模型(by FoxJoy):",
|
35 |
+
"ASR 模型": "ASR 模型",
|
36 |
+
"ASR 模型尺寸": "ASR 模型尺寸",
|
37 |
+
"数据类型精度": "数据类型精度",
|
38 |
+
"ASR 语言设置": "ASR 语言设置",
|
39 |
+
"ASR进程输出信息": "ASR进程输出信息",
|
40 |
+
"GPT模型列表": "GPT模型列表",
|
41 |
+
"GPT训练进程输出信息": "GPT训练进程输出信息",
|
42 |
+
"GPU卡号,只能填1个整数": "GPU卡号,只能填1个整数",
|
43 |
+
"GPU卡号以-分割,每个卡号一个进程": "GPU卡号以-分割,每个卡号一个进程",
|
44 |
+
"SSL进程输出信息": "SSL进程输出信息",
|
45 |
+
"SoVITS模型列表": "SoVITS模型列表",
|
46 |
+
"SoVITS训练进程输出信息": "SoVITS训练进程输出信息",
|
47 |
+
"TTS推理WebUI进程输出信息": "TTS推理WebUI进程输出信息",
|
48 |
+
"TTS推理进程已关闭": "TTS推理进程已关闭",
|
49 |
+
"TTS推理进程已开启": "TTS推理进程已开启",
|
50 |
+
"UVR5已关闭": "UVR5已关闭",
|
51 |
+
"UVR5已开启": "UVR5已开启",
|
52 |
+
"UVR5进程输出信息": "UVR5进程输出信息",
|
53 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix:混多少比例归一化后音频进来",
|
54 |
+
"gpt采样参数(无参考文本时不要太低。不懂就用默认):": "gpt采样参数(无参考文本时不要太低。不懂就用默认):",
|
55 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)",
|
56 |
+
"max:归一化后最大值多少": "max:归一化后最大值多少",
|
57 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept:切完后静音最多留多长",
|
58 |
+
"min_interval:最短切割间隔": "min_interval:最短切割间隔",
|
59 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值",
|
60 |
+
"temperature": "temperature",
|
61 |
+
"threshold:音量小于这个值视作静音的备选切割点": "threshold:音量小于这个值视作静音的备选切割点",
|
62 |
+
"top_k": "top_k",
|
63 |
+
"top_p": "top_p",
|
64 |
+
"一键三连进程输出信息": "一键三连进程输出信息",
|
65 |
+
"不切": "不切",
|
66 |
+
"中文": "中文",
|
67 |
+
"中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e": "中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e",
|
68 |
+
"中英混合": "中英混合",
|
69 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量输入音频文件, 二选一, 优先读文件夹",
|
70 |
+
"人声伴奏分离批量处理, 使用UVR5模型。": "人声伴奏分离批量处理, 使用UVR5模型。",
|
71 |
+
"人声提取激进程度": "人声提取激进程度",
|
72 |
+
"伴奏人声分离&去混响&去回声": "伴奏人声分离&去混响&去回声",
|
73 |
+
"使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。": "使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。",
|
74 |
+
"保存频率save_every_epoch": "保存频率save_every_epoch",
|
75 |
+
"凑50字一切": "凑50字一切",
|
76 |
+
"凑四句一切": "凑四句一切",
|
77 |
+
"切分后文本": "切分后文本",
|
78 |
+
"切分后的子音频的输出根目录": "切分后的子音频的输出根目录",
|
79 |
+
"切割使用的进程数": "切割使用的进程数",
|
80 |
+
"刷新模型路径": "刷新模型路径",
|
81 |
+
"前端处理后的文本(每句):": "前端处理后的文本(每句):",
|
82 |
+
"去混响/去延迟,附:": "去混响/去延迟,附:",
|
83 |
+
"参考音频在3~10秒范围外,请更换!": "参考音频在3~10秒范围外,请更换!",
|
84 |
+
"参考音频的文本": "参考音频的文本",
|
85 |
+
"参考音频的语种": "参考音频的语种",
|
86 |
+
"合成语音": "合成语音",
|
87 |
+
"合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。": "合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。",
|
88 |
+
"后续将支持转音素、手工修改音素、语音合成分步执行。": "后续将支持转音素、手工修改音素、语音合成分步执行。",
|
89 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。": "填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。",
|
90 |
+
"多语种混合": "多语种混合",
|
91 |
+
"实际输入的参考文本:": "实际输入的参考文本:",
|
92 |
+
"实际输入的目标文本(切句后):": "实际输入的目标文本(切句后):",
|
93 |
+
"实际输入的目标文本(每句):": "实际输入的目标文本(每句):",
|
94 |
+
"实际输入的目标文本:": "实际输入的目标文本:",
|
95 |
+
"导出文件格式": "导出文件格式",
|
96 |
+
"开启GPT训练": "开启GPT训练",
|
97 |
+
"开启SSL提取": "开启SSL提取",
|
98 |
+
"开启SoVITS训练": "开启SoVITS训练",
|
99 |
+
"开启一键三连": "开启一键三连",
|
100 |
+
"开启文本获取": "开启文本获取",
|
101 |
+
"开启无参考文本模式。不填参考文本亦相当于开启。": "开启无参考文本模式。不填参考文本亦相当于开启。",
|
102 |
+
"开启离线批量ASR": "开启离线批量ASR",
|
103 |
+
"开启语义token提取": "开启语义token提取",
|
104 |
+
"开启语音切割": "开启语音切割",
|
105 |
+
"开启语音降噪": "开启语音降噪",
|
106 |
+
"怎么切": "怎么切",
|
107 |
+
"总训练轮数total_epoch": "总训练轮数total_epoch",
|
108 |
+
"总训练轮数total_epoch,不建议太高": "总训练轮数total_epoch,不建议太高",
|
109 |
+
"打标工具WebUI已关闭": "打标工具WebUI已关闭",
|
110 |
+
"打标工具WebUI已开启": "打标工具WebUI已开启",
|
111 |
+
"打标工具进程输出信息": "打标工具进程输出信息",
|
112 |
+
"指定输出主人声文件夹": "指定输出主人声文件夹",
|
113 |
+
"指定输出非主人声文件夹": "指定输出非主人声文件夹",
|
114 |
+
"按中文句号。切": "按中文句号。切",
|
115 |
+
"按标点符号切": "按标点符号切",
|
116 |
+
"按英文句号.切": "按英文句���.切",
|
117 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。",
|
118 |
+
"文本模块学习率权重": "文本模块学习率权重",
|
119 |
+
"文本进程输出信息": "文本进程输出信息",
|
120 |
+
"施工中,请静候佳音": "施工中,请静候佳音",
|
121 |
+
"日文": "日文",
|
122 |
+
"日英混合": "日英混合",
|
123 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "是否仅保存最新的ckpt文件以节省硬盘空间",
|
124 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存时间点将最终小模型保存至weights文件夹",
|
125 |
+
"是否开启TTS推理WebUI": "是否开启TTS推理WebUI",
|
126 |
+
"是否开启UVR5-WebUI": "是否开启UVR5-WebUI",
|
127 |
+
"是否开启dpo训练选项(实验性)": "是否开启dpo训练选项(实验性)",
|
128 |
+
"是否开启打标WebUI": "是否开启打标WebUI",
|
129 |
+
"是否直接对上次合成结果调整语速。防止随机性。": "是否直接对上次合成结果调整语速。防止随机性。",
|
130 |
+
"显卡信息": "显卡信息",
|
131 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.",
|
132 |
+
"模型": "模型",
|
133 |
+
"模型分为三类:": "模型分为三类:",
|
134 |
+
"模型切换": "模型切换",
|
135 |
+
"每张显卡的batch_size": "每张显卡的batch_size",
|
136 |
+
"终止ASR进程": "终止ASR进程",
|
137 |
+
"终止GPT训练": "终止GPT训练",
|
138 |
+
"终止SSL提取进程": "终止SSL提取进程",
|
139 |
+
"终止SoVITS训练": "终止SoVITS训练",
|
140 |
+
"终止一键三连": "终止一键三连",
|
141 |
+
"终止文本获取进程": "终止文本获取进程",
|
142 |
+
"终止语义token提取进程": "终止语义token提取进程",
|
143 |
+
"终止语音切割": "终止语音切割",
|
144 |
+
"终止语音降噪进程": "终止语音降噪进程",
|
145 |
+
"英文": "英文",
|
146 |
+
"语义token提取进程输出信息": "语义token提取进程输出信息",
|
147 |
+
"语速": "语速",
|
148 |
+
"语速调整,高为更快": "语速调整,高为更快",
|
149 |
+
"语音切割进程输出信息": "语音切割进程输出信息",
|
150 |
+
"语音降噪进程输出信息": "语音降噪进程输出信息",
|
151 |
+
"请上传3~10秒内参考音频,超过会报错!": "请上传3~10秒内参考音频,超过会报错!",
|
152 |
+
"请输入有效文本": "请输入有效文本",
|
153 |
+
"转换": "转换",
|
154 |
+
"输入待处理音频文件夹路径": "输入待处理音频文件夹路径",
|
155 |
+
"输入文件夹路径": "输入文件夹路径",
|
156 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "输出logs/实验名目录下应有23456开头的文件和文件夹",
|
157 |
+
"输出信息": "输出信息",
|
158 |
+
"输出文件夹路径": "输出文件夹路径",
|
159 |
+
"输出的语音": "输出的语音",
|
160 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。",
|
161 |
+
"降噪结果输出文件夹": "降噪结果输出文件夹",
|
162 |
+
"降噪音频文件输入文件夹": "降噪音频文件输入文件夹",
|
163 |
+
"需要合成的切分前文本": "需要合成的切分前文本",
|
164 |
+
"需要合成的文本": "需要合成的文本",
|
165 |
+
"需要合成的语种": "需要合成的语种",
|
166 |
+
"音频自动切分输入路径,可文件可文件夹": "音频自动切分输入路径,可文件可文件夹",
|
167 |
+
"预训练的GPT模型路径": "预训练的GPT模型路径",
|
168 |
+
"预训练的SSL模型路径": "预训练的SSL模型路径",
|
169 |
+
"预训练的SoVITS-D模型路径": "预训练的SoVITS-D模型路径",
|
170 |
+
"预训练的SoVITS-G模型路径": "预训练的SoVITS-G模型路径",
|
171 |
+
"预训练的中文BERT模型路径": "预训练的中文BERT模型路径"
|
172 |
+
}
|
tools/i18n/locale/zh_HK.json
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb):對於雙通道混響是最佳選擇,但不能去除單通道混響;",
|
3 |
+
"(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho: 去除延遲效果。Aggressive 比 Normal 去除得更徹底,DeReverb 額外去除混響,可去除單聲道混響,但對高頻重的板式混響去不乾淨。",
|
4 |
+
"*GPT模型列表": "*GPT模型列表",
|
5 |
+
"*SoVITS模型列表": "*SoVITS模型列表",
|
6 |
+
"*实验/模型名": "*實驗/模型名",
|
7 |
+
"*文本标注文件": "*文本標注文件",
|
8 |
+
"*训练集音频文件目录": "*訓練集音頻文件目錄",
|
9 |
+
"*请上传并填写参考信息": "*請上傳並填寫參考信息",
|
10 |
+
"*请填写需要合成的目标文本和语种模式": "請填寫需要合成的目標文本和語言模式",
|
11 |
+
".list标注文件的路径": ".list標註文件的路徑",
|
12 |
+
"0-前置数据集获取工具": "0-前置數據集獲取工具",
|
13 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5人聲伴奏分離&去混響去延遲工具",
|
14 |
+
"0b-语音切分工具": "0b-語音切分工具",
|
15 |
+
"0bb-语音降噪工具": "0bb-語音降噪工具",
|
16 |
+
"0c-中文批量离线ASR工具": "0c-中文批量離線ASR工具",
|
17 |
+
"0d-语音文本校对标注工具": "0d-語音文本校對標注工具",
|
18 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
19 |
+
"1A-训练集格式化工具": "1A-訓練集格式化工具",
|
20 |
+
"1Aa-文本内容": "1Aa-文本內容",
|
21 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-訓練集格式化一鍵三連",
|
22 |
+
"1Ab-SSL自监督特征提取": "1Ab-SSL自監督特徵提取",
|
23 |
+
"1Ac-语义token提取": "1Ac-語義token提取",
|
24 |
+
"1B-微调训练": "1B-微調訓練",
|
25 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS訓練。用於分享的模型文件輸出在SoVITS_weights下。",
|
26 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT訓練。用於分享的模型文件輸出在GPT_weights下。",
|
27 |
+
"1C-推理": "1C-推理",
|
28 |
+
"1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1、DeEcho-DeReverb 模型的處理時間是另外兩個 DeEcho 模型的接近兩倍;",
|
29 |
+
"1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;": "1、保留人聲:不帶和聲的音頻選這個,對主人聲保留比HP5更好。內置HP2和HP3兩個模型,HP3可能輕微漏伴奏但對主人聲保留比HP2稍微好一點點;",
|
30 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-變聲",
|
31 |
+
"2、MDX-Net-Dereverb模型挺慢的;": "2、MDX-Net-Dereverb 模型的處理時間挺慢的;",
|
32 |
+
"2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;": "2、僅保留主人聲:帶和聲的音頻選這個,對主人聲可能有削弱。內置HP5一個模型;",
|
33 |
+
"3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "3、個人推薦的最乾淨的配置是先 MDX-Net 再 DeEcho-Aggressive。",
|
34 |
+
"3、去混响、去延迟模型(by FoxJoy):": "3、去混響、去延遲模型(by FoxJoy):",
|
35 |
+
"ASR 模型": "ASR 模型",
|
36 |
+
"ASR 模型尺寸": "ASR 模型尺寸",
|
37 |
+
"数据类型精度": "數據類型精度",
|
38 |
+
"ASR 语言设置": "ASR 語言設置",
|
39 |
+
"ASR进程输出信息": "ASR進程輸出信息",
|
40 |
+
"GPT模型列表": "GPT模型列表",
|
41 |
+
"GPT训练进程输出信息": "GPT訓練進程輸出信息",
|
42 |
+
"GPU卡号,只能填1个整数": "GPU卡號,只能填1個整數",
|
43 |
+
"GPU卡号以-分割,每个卡号一个进程": "GPU卡號以-分割,每個卡號一個進程",
|
44 |
+
"SSL进程输出信息": "SSL進程輸出信息",
|
45 |
+
"SoVITS模型列表": "SoVITS模型列表",
|
46 |
+
"SoVITS训练进程输出信息": "SoVITS訓練進程輸出信息",
|
47 |
+
"TTS推理WebUI进程输出信息": "TTS推理WebUI進程輸出信息",
|
48 |
+
"TTS推理进程已关闭": "TTS推理進程已關閉",
|
49 |
+
"TTS推理进程已开启": "TTS推理進程已開啟",
|
50 |
+
"UVR5已关闭": "UVR5已關閉",
|
51 |
+
"UVR5已开启": "UVR5已開啟",
|
52 |
+
"UVR5进程输出信息": "UVR5進程輸出信息",
|
53 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix:混多少比例歸一化後音頻進來",
|
54 |
+
"gpt采样参数(无参考文本时不要太低。不懂就用默认):": "GPT 采样参数(无参考文本时不要太低。不懂就用默认):",
|
55 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size:怎麼算音量曲線,越小精度越大計算量越高(不是精度越大效果越好)",
|
56 |
+
"max:归一化后最大值多少": "max:歸一化後最大值多少",
|
57 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept:切完後靜音最多留多長",
|
58 |
+
"min_interval:最短切割间隔": "min_interval:最短切割間隔",
|
59 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:每段最小多長,如果第一段太短一直和後面段連起來直到超過這個值",
|
60 |
+
"temperature": "temperature",
|
61 |
+
"threshold:音量小于这个值视作静音的备选切割点": "threshold:音量小於這個值視作靜音的備選切割點",
|
62 |
+
"top_k": "top_k",
|
63 |
+
"top_p": "top_p",
|
64 |
+
"一键三连进程输出信息": "一鍵三連進程輸出信息",
|
65 |
+
"不切": "不切",
|
66 |
+
"中文": "中文",
|
67 |
+
"中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e": "中文教程文檔:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e",
|
68 |
+
"中英混合": "中英混合",
|
69 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量输入音频文件, 二选一, 优先读文件夹",
|
70 |
+
"人声伴奏分离批量处理, 使用UVR5模型。": "人聲伴奏分離批量處理, 使用UVR5模型。",
|
71 |
+
"人声提取激进程度": "人聲提取激進程度",
|
72 |
+
"伴奏人声分离&去混响&去回声": "伴奏人聲分離&去混響&去回聲",
|
73 |
+
"使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。": "使用無參考文本模式時建議使用微調的GPT,聽不清參考音頻說的是啥(不知道寫啥)可以開啟,開啟後無視填寫的參考文本。",
|
74 |
+
"保存频率save_every_epoch": "保存頻率save_every_epoch",
|
75 |
+
"凑50字一切": "湊50字一切",
|
76 |
+
"凑四句一切": "湊四句一切",
|
77 |
+
"切分后文本": "切分後文本",
|
78 |
+
"切分后的子音频的输出根目录": "切分後的子音頻的輸出根目錄",
|
79 |
+
"切割使用的进程数": "切割使用的進程數",
|
80 |
+
"刷新模型路径": "刷新模型路徑",
|
81 |
+
"前端处理后的文本(每句):": "前端處理後的文本(每句):",
|
82 |
+
"去混响/去延迟,附:": "去混響/去延遲,附",
|
83 |
+
"参考音频在3~10秒范围外,请更换!": "參考音頻在3~10秒範圍外,請更換!",
|
84 |
+
"参考音频的文本": "參考音頻的文本",
|
85 |
+
"参考音频的语种": "參考音頻的語種",
|
86 |
+
"合成语音": "合成語音",
|
87 |
+
"合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。": "合格的文件夾路徑格式舉例: E:\\codes\\py39\\vits_vc_gpu\\白鷺霜華測試樣例(去文件管理器地址欄拷就行了)。",
|
88 |
+
"后续将支持转音素、手工修改音素、语音合成分步执行。": "後續將支持轉音素、手工修改音素、語音合成分步執行。",
|
89 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。": "填切割後音頻所在目錄!讀取的音頻文件完整路徑=該目錄-拼接-list文件裡波形對應的文件名(不是全路徑)。如果留空則使用.list文件裡的絕對全路徑。",
|
90 |
+
"多语种混合": "多語種混合",
|
91 |
+
"实际输入的参考文本:": "實際輸入的參考文本:",
|
92 |
+
"实际输入的目标文本(切句后):": "實際輸入的目標文本(切句後):",
|
93 |
+
"实际输入的目标文本(每句):": "實際輸入的目標文本(每句):",
|
94 |
+
"实际输入的目标文本:": "實際輸入的目標文本:",
|
95 |
+
"导出文件格式": "導出檔格式",
|
96 |
+
"开启GPT训练": "開啟GPT訓練",
|
97 |
+
"开启SSL提取": "開啟SSL提取",
|
98 |
+
"开启SoVITS训练": "開啟SoVITS訓練",
|
99 |
+
"开启一键三连": "開啟一鍵三連",
|
100 |
+
"开启文本获取": "開啟文本獲取",
|
101 |
+
"开启无参考文本模式。不填参考文本亦相当于开启。": "開啟無參考文本模式。不填參考文本亦相當於開啟。",
|
102 |
+
"开启离线批量ASR": "開啟離線批量ASR",
|
103 |
+
"开启语义token提取": "開啟語義token提取",
|
104 |
+
"开启语音切割": "開啟語音切割",
|
105 |
+
"开启语音降噪": "開啟語音降噪",
|
106 |
+
"怎么切": "怎麼切",
|
107 |
+
"总训练轮数total_epoch": "總訓練輪數total_epoch",
|
108 |
+
"总训练轮数total_epoch,不建议太高": "總訓練輪數total_epoch,不建議太高",
|
109 |
+
"打标工具WebUI已关闭": "打標工具WebUI已關閉",
|
110 |
+
"打标工具WebUI已开启": "打標工具WebUI已開啟",
|
111 |
+
"打标工具进程输出信息": "打標工具進程輸出信息",
|
112 |
+
"指定输出主人声文件夹": "指定输出主人声文件夹",
|
113 |
+
"指定输出非主人声文件夹": "指定输出非主人声文件夹",
|
114 |
+
"按中文句号。切": "按中文句號。切",
|
115 |
+
"按标点符号切": "按標點符��切",
|
116 |
+
"按英文句号.切": "按英文句號.切",
|
117 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "文本切分工具。太長的文本合成出來效果不一定好,所以太長建議先切。合成會根據文本的換行分開合成再拼起來。",
|
118 |
+
"文本模块学习率权重": "文本模塊學習率權重",
|
119 |
+
"文本进程输出信息": "文本進程輸出信息",
|
120 |
+
"施工中,请静候佳音": "施工中,請靜候佳音",
|
121 |
+
"日文": "日文",
|
122 |
+
"日英混合": "日英混合",
|
123 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "是否僅保存最新的ckpt文件以節省硬碟空間",
|
124 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存時間點將最終小模型保存至weights文件夾",
|
125 |
+
"是否开启TTS推理WebUI": "是否開啟TTS推理WebUI",
|
126 |
+
"是否开启UVR5-WebUI": "是否開啟UVR5-WebUI",
|
127 |
+
"是否开启dpo训练选项(实验性)": "是否開啟dpo訓練選項(實驗性)",
|
128 |
+
"是否开启打标WebUI": "是否開啟打標WebUI",
|
129 |
+
"是否直接对上次合成结果调整语速。防止随机性。": "是否直接對上次合成結果調整語速。防止隨機性。",
|
130 |
+
"显卡信息": "顯卡信息",
|
131 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "本軟件以MIT協議開源, 作者不對軟件具備任何控制力, 使用軟件者、傳播軟件導出的聲音者自負全責. <br>如不認可該條款, 則不能使用或引用軟件包內任何代碼和文件. 詳見根目錄<b>LICENSE</b>.",
|
132 |
+
"模型": "模型",
|
133 |
+
"模型分为三类:": "模型分為三類:",
|
134 |
+
"模型切换": "模型切換",
|
135 |
+
"每张显卡的batch_size": "每張顯卡的batch_size",
|
136 |
+
"终止ASR进程": "終止ASR進程",
|
137 |
+
"终止GPT训练": "終止GPT訓練",
|
138 |
+
"终止SSL提取进程": "終止SSL提取進程",
|
139 |
+
"终止SoVITS训练": "終止SoVITS訓練",
|
140 |
+
"终止一键三连": "終止一鍵三連",
|
141 |
+
"终止文本获取进程": "終止文本獲取進程",
|
142 |
+
"终止语义token提取进程": "終止語義token提取進程",
|
143 |
+
"终止语音切割": "終止語音切割",
|
144 |
+
"终止语音降噪进程": "終止語音降噪進程",
|
145 |
+
"英文": "英文",
|
146 |
+
"语义token提取进程输出信息": "語義token提取進程輸出信息",
|
147 |
+
"语速": "語速",
|
148 |
+
"语速调整,高为更快": "調整語速,高為更快",
|
149 |
+
"语音切割进程输出信息": "語音切割進程輸出信息",
|
150 |
+
"语音降噪进程输出信息": "語音降噪進程輸出信息",
|
151 |
+
"请上传3~10秒内参考音频,超过会报错!": "請上傳3~10秒內參考音頻,超過會報錯!",
|
152 |
+
"请输入有效文本": "請輸入有效文本",
|
153 |
+
"转换": "轉換",
|
154 |
+
"输入待处理音频文件夹路径": "輸入待處理音頻資料夾路徑",
|
155 |
+
"输入文件夹路径": "輸入文件夾路徑",
|
156 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "輸出logs/實驗名目錄下應有23456開頭的文件和文件夾",
|
157 |
+
"输出信息": "輸出訊息",
|
158 |
+
"输出文件夹路径": "輸出文件夾路徑",
|
159 |
+
"输出的语音": "輸出的語音",
|
160 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "選擇訓練完存放在SoVITS_weights和GPT_weights下的模型。默認的一個是底模,體驗5秒Zero Shot TTS用。",
|
161 |
+
"降噪结果输出文件夹": "降噪結果輸出文件夾",
|
162 |
+
"降噪音频文件输入文件夹": "降噪音頻文件輸入文件夾",
|
163 |
+
"需要合成的切分前文本": "需要合成的切分前文本",
|
164 |
+
"需要合成的文本": "需要合成的文本",
|
165 |
+
"需要合成的语种": "需要合成的語種",
|
166 |
+
"音频自动切分输入路径,可文件可文件夹": "音頻自動切分輸入路徑,可文件可文件夾",
|
167 |
+
"预训练的GPT模型路径": "預訓練的GPT模型路徑",
|
168 |
+
"预训练的SSL模型路径": "預訓練的SSL模型路徑",
|
169 |
+
"预训练的SoVITS-D模型路径": "預訓練的SoVITS-D模型路徑",
|
170 |
+
"预训练的SoVITS-G模型路径": "預訓練的SoVITS-G模型路徑",
|
171 |
+
"预训练的中文BERT模型路径": "預訓練的中文BERT模型路徑"
|
172 |
+
}
|
tools/i18n/locale/zh_SG.json
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb):對於雙通道混響是最好的選擇,不能去除單通道混響;",
|
3 |
+
"(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho: Aggressive 比 Normal 去除得更徹底,DeReverb 額外去除混響,可去除單聲道混響,但是對高頻重的板式混響去不乾淨。",
|
4 |
+
"*GPT模型列表": "*GPT模型列表",
|
5 |
+
"*SoVITS模型列表": "*SoVITS模型列表",
|
6 |
+
"*实验/模型名": "*實驗/模型名",
|
7 |
+
"*文本标注文件": "*文本標註文件",
|
8 |
+
"*训练集音频文件目录": "*訓練集音頻文件目錄",
|
9 |
+
"*请上传并填写参考信息": "*請上傳並填寫參考信息",
|
10 |
+
"*请填写需要合成的目标文本和语种模式": "請填寫需要合成的目標文本和語言模式",
|
11 |
+
".list标注文件的路径": ".list標註文件的路徑",
|
12 |
+
"0-前置数据集获取工具": "0-前置數據集獲取工具",
|
13 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5人聲伴奏分離&去混響去延遲工具",
|
14 |
+
"0b-语音切分工具": "0b-語音切分工具",
|
15 |
+
"0bb-语音降噪工具": "0bb-語音降噪工具",
|
16 |
+
"0c-中文批量离线ASR工具": "0c-中文批量離線ASR工具",
|
17 |
+
"0d-语音文本校对标注工具": "0d-語音文本校對標註工具",
|
18 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
19 |
+
"1A-训练集格式化工具": "1A-訓練集格式化工具",
|
20 |
+
"1Aa-文本内容": "1Aa-文本內容",
|
21 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-訓練集格式化一鍵三連",
|
22 |
+
"1Ab-SSL自监督特征提取": "1Ab-SSL自監督特徵提取",
|
23 |
+
"1Ac-语义token提取": "1Ac-語義token提取",
|
24 |
+
"1B-微调训练": "1B-微調訓練",
|
25 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS訓練。用於分享的模型文件輸出在SoVITS_weights下。",
|
26 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT訓練。用於分享的模型文件輸出在GPT_weights下。",
|
27 |
+
"1C-推理": "1C-推理",
|
28 |
+
"1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1、DeEcho-DeReverb 模型的耗時是另外兩個 DeEcho 模型的接近兩倍;",
|
29 |
+
"1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;": "1、保留人聲:不帶和聲的音頻選這個,對主人聲保留比HP5更好。內置HP2和HP3兩個模型,HP3可能輕微漏伴奏但對主人聲保留比HP2稍微好一丁點;",
|
30 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-變聲",
|
31 |
+
"2、MDX-Net-Dereverb模型挺慢的;": "2、MDX-Net-Dereverb 模型的處理時間挺慢的;",
|
32 |
+
"2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;": "2、僅保留主人聲:帶和聲的音頻選這個,對主人聲可能有削弱。內置HP5一個模型;",
|
33 |
+
"3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "3、個人推薦的最乾淨的配置是先 MDX-Net 再 DeEcho-Aggressive。",
|
34 |
+
"3、去混响、去延迟模型(by FoxJoy):": "3、去混響、去延遲模型(by FoxJoy):",
|
35 |
+
"ASR 模型": "ASR 模型",
|
36 |
+
"ASR 模型尺寸": "ASR 模型尺寸",
|
37 |
+
"数据类型精度": "數據類型精度",
|
38 |
+
"ASR 语言设置": "ASR 語言設定",
|
39 |
+
"ASR进程输出信息": "ASR進程輸出資訊",
|
40 |
+
"GPT模型列表": "GPT模型列表",
|
41 |
+
"GPT训练进程输出信息": "GPT訓練進程輸出資訊",
|
42 |
+
"GPU卡号,只能填1个整数": "GPU卡號,只能填1個整數",
|
43 |
+
"GPU卡号以-分割,每个卡号一个进程": "GPU卡號以-分割,每個卡號一個進程",
|
44 |
+
"SSL进程输出信息": "SSL進程輸出資訊",
|
45 |
+
"SoVITS模型列表": "SoVITS模型列表",
|
46 |
+
"SoVITS训练进程输出信息": "SoVITS訓練進程輸出資訊",
|
47 |
+
"TTS推理WebUI进程输出信息": "TTS推理WebUI進程輸出資訊",
|
48 |
+
"TTS推理进程已关闭": "TTS推理進程已關閉",
|
49 |
+
"TTS推理进程已开启": "TTS推理進程已開啟",
|
50 |
+
"UVR5已关闭": "UVR5已關閉",
|
51 |
+
"UVR5已开启": "UVR5已開啟",
|
52 |
+
"UVR5进程输出信息": "UVR5進程輸出資訊",
|
53 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix:混多少比例歸一化後音頻進來",
|
54 |
+
"gpt采样参数(无参考文本时不要太低。不懂就用默认):": "GPT 采样参数(无参考文本时不要太低。不懂就用默认):",
|
55 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size:怎麼算音量曲線,越小精度越大計算量越高(不是精度越大效果越好)",
|
56 |
+
"max:归一化后最大值多少": "max:歸一化後最大值多少",
|
57 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept:切完後靜音最多留多長",
|
58 |
+
"min_interval:最短切割间隔": "min_interval:最短切割間隔",
|
59 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:每段最小多長,如果第一段太短一直和後面段連起來直到超過這個值",
|
60 |
+
"temperature": "temperature",
|
61 |
+
"threshold:音量小于这个值视作静音的备选切割点": "threshold:音量小於這個值視作靜音的備選切割點",
|
62 |
+
"top_k": "top_k",
|
63 |
+
"top_p": "top_p",
|
64 |
+
"一键三连进程输出信息": "一鍵三連進程輸出資訊",
|
65 |
+
"不切": "不切",
|
66 |
+
"中文": "中文",
|
67 |
+
"中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e": "中文教程文檔:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e",
|
68 |
+
"中英混合": "中英混合",
|
69 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量输入音频文件, 二选一, 优先读文件夹",
|
70 |
+
"人声伴奏分离批量处理, 使用UVR5模型。": "人聲伴奏分離批量處理, 使用UVR5模型。",
|
71 |
+
"人声提取激进程度": "人聲提取激進程度",
|
72 |
+
"伴奏人声分离&去混响&去回声": "伴奏人聲分離&去混響&去回聲",
|
73 |
+
"使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。": "使用無參考文本模式時建議使用微調的GPT,聽不清參考音頻說的啥(不曉得寫啥)可以開,開啟後無視填寫的參考文本。",
|
74 |
+
"保存频率save_every_epoch": "保存頻率save_every_epoch",
|
75 |
+
"凑50字一切": "湊50字一切",
|
76 |
+
"凑四句一切": "湊四句一切",
|
77 |
+
"切分后文本": "切分後文本",
|
78 |
+
"切分后的子音频的输出根目录": "切分後的子音頻的輸出根目錄",
|
79 |
+
"切割使用的进程数": "切割使用的進程數",
|
80 |
+
"刷新模型路径": "刷新模型路徑",
|
81 |
+
"前端处理后的文本(每句):": "前端處理後的文本(每句):",
|
82 |
+
"去混响/去延迟,附:": "去混響/去延遲,附:",
|
83 |
+
"参考音频在3~10秒范围外,请更换!": "參考音頻在3~10秒範圍外,請更換!",
|
84 |
+
"参考音频的文本": "參考音頻的文本",
|
85 |
+
"参考音频的语种": "參考音頻的語種",
|
86 |
+
"合成语音": "合成語音",
|
87 |
+
"合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。": "合格的資料夾路徑格式舉例: E:\\codes\\py39\\vits_vc_gpu\\白鷺霜華測試範例(去文件管理器地址欄拷就行了)。",
|
88 |
+
"后续将支持转音素、手工修改音素、语音合成分步执行。": "後續將支持轉音素、手工修改音素、語音合成分步執行。",
|
89 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。": "填切割後音頻所在目錄!讀取的音頻檔案完整路徑=該目錄-拼接-list檔案裡波形對應的檔案名(不是全路徑)。如果留空則使用.list檔案裡的絕對全路徑。",
|
90 |
+
"多语种混合": "多語種混合",
|
91 |
+
"实际输入的参考文本:": "實際輸入的參考文本:",
|
92 |
+
"实际输入的目标文本(切句后):": "實際輸入的目標文本(切句後):",
|
93 |
+
"实际输入的目标文本(每句):": "實際輸入的目標文本(每句):",
|
94 |
+
"实际输入的目标文本:": "實際輸入的目標文本:",
|
95 |
+
"导出文件格式": "導出檔格式",
|
96 |
+
"开启GPT训练": "開啟GPT訓練",
|
97 |
+
"开启SSL提取": "開啟SSL提取",
|
98 |
+
"开启SoVITS训练": "開啟SoVITS訓練",
|
99 |
+
"开启一键三连": "開啟一鍵三連",
|
100 |
+
"开启文本获取": "開啟文本獲取",
|
101 |
+
"开启无参考文本模式。不填参考文本亦相当于开启。": "開啟無參考文本模式。不填參考文本亦相當於開啟。",
|
102 |
+
"开启离线批量ASR": "開啟離線批量ASR",
|
103 |
+
"开启语义token提取": "開啟語義token提取",
|
104 |
+
"开启语音切割": "開啟語音切割",
|
105 |
+
"开启语音降噪": "開啟語音降噪",
|
106 |
+
"怎么切": "怎麼切",
|
107 |
+
"总训练轮数total_epoch": "總訓練輪數total_epoch",
|
108 |
+
"总训练轮数total_epoch,不建议太高": "總訓練輪數total_epoch,不建議太高",
|
109 |
+
"打标工具WebUI已关闭": "打標工具WebUI已關閉",
|
110 |
+
"打标工具WebUI已开启": "打標工具WebUI已開啟",
|
111 |
+
"打标工具进程输出信息": "打標工具進程輸出資訊",
|
112 |
+
"指定输出主人声文件夹": "指定输出主人声文件夹",
|
113 |
+
"指定输出非主人声文件夹": "指定输出非主人声文件夹",
|
114 |
+
"按中文句号。切": "按中文句號。切",
|
115 |
+
"按标点符号切": "按標點符號切",
|
116 |
+
"按英文句号.切": "按英文句號.切",
|
117 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "文本切分工具。太長的文本合成出來效果不一定好,所以太長建議先切。合成會根據文本的換行分開合成再拼起來。",
|
118 |
+
"文本模块学习率权重": "文本模塊學習率權重",
|
119 |
+
"文本进程输出信息": "文本進程輸出資訊",
|
120 |
+
"施工中,请静候佳音": "施工中,請靜候佳音",
|
121 |
+
"日文": "日文",
|
122 |
+
"日英混合": "日英混合",
|
123 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "是否僅保存最新的ckpt文件以節省硬盤空間",
|
124 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存時間點將最終小模型保存至weights文件夾",
|
125 |
+
"是否开启TTS推理WebUI": "是否開啟TTS推理WebUI",
|
126 |
+
"是否开启UVR5-WebUI": "是否開啟UVR5-WebUI",
|
127 |
+
"是否开启dpo训练选项(实验性)": "是否開啟dpo訓練選項(實驗性)",
|
128 |
+
"是否开启打标WebUI": "是否開啟打標WebUI",
|
129 |
+
"是否直接对上次合成结果调整语速。防止随机性。": "是否直接對上次合成結果調整語速。防止隨機性。",
|
130 |
+
"显卡信息": "顯卡資訊",
|
131 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "本軟體以MIT協議開源,作者不對軟體具備任何控制力,使用軟體者、傳播軟體導出的聲音者自負全責。<br>如不認可該條款,則不能使用或引用軟體包內任何代碼和文件。詳見根目錄<b>LICENSE</b>。",
|
132 |
+
"模型": "模型",
|
133 |
+
"模型分为三类:": "模型分為三類:",
|
134 |
+
"模型切换": "模型切換",
|
135 |
+
"每张显卡的batch_size": "每張顯卡的batch_size",
|
136 |
+
"终止ASR进程": "終止ASR進程",
|
137 |
+
"终止GPT训练": "終止GPT訓練",
|
138 |
+
"终止SSL提取进程": "終止SSL提取進程",
|
139 |
+
"终止SoVITS训练": "終止SoVITS訓練",
|
140 |
+
"终止一键三连": "終止一鍵三連",
|
141 |
+
"终止文本获取进程": "終止文本獲取進程",
|
142 |
+
"终止语义token提取进程": "終止語義token提取進程",
|
143 |
+
"终止语音切割": "終止語音切割",
|
144 |
+
"终止语音降噪进程": "終止語音降噪進程",
|
145 |
+
"英文": "英文",
|
146 |
+
"语义token提取进程输出信息": "語義token提取進程輸出資訊",
|
147 |
+
"语速": "語速",
|
148 |
+
"语速调整,高为更快": "調整語速,高為更快",
|
149 |
+
"语音切割进程输出信息": "語音切割進程輸出資訊",
|
150 |
+
"语音降噪进程输出信息": "語音降噪進程輸出資訊",
|
151 |
+
"请上传3~10秒内参考音频,超过会报错!": "請上傳3~10秒內參考音頻,超過會報錯!",
|
152 |
+
"请输入有效文本": "請輸入有效文本",
|
153 |
+
"转换": "轉換",
|
154 |
+
"输入待处理音频文件夹路径": "輸入待處理音頻資料夾路徑",
|
155 |
+
"输入文件夹路径": "輸入文件夾路徑",
|
156 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "輸出logs/實驗名目錄下應有23456開頭的文件和文件夾",
|
157 |
+
"输出信息": "輸出訊息",
|
158 |
+
"输出文件夹路径": "輸出文件夾路徑",
|
159 |
+
"输出的语音": "輸出的語音",
|
160 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "選擇訓練完存放在SoVITS_weights和GPT_weights下的模型。默認的一個是底模,體驗5秒Zero Shot TTS用。",
|
161 |
+
"降噪结果输出文件夹": "降噪結果輸出文件夾",
|
162 |
+
"降噪音频文件输入文件夹": "降噪音頻文件輸入文件夾",
|
163 |
+
"需要合成的切分前文本": "需要合成的切分前文本",
|
164 |
+
"需要合成的文本": "需要合成的文本",
|
165 |
+
"需要合成的语种": "需要合成的語種",
|
166 |
+
"音频自动切分输入路径,可文件可文件夹": "音頻自動切分輸入路徑,可文件可文件夾",
|
167 |
+
"预训练的GPT模型路径": "預訓練的GPT模型路徑",
|
168 |
+
"预训练的SSL模型路径": "預訓練的SSL模型路徑",
|
169 |
+
"预训练的SoVITS-D模型路径": "預訓練的SoVITS-D模型路徑",
|
170 |
+
"预训练的SoVITS-G模型路径": "預訓練的SoVITS-G模型路徑",
|
171 |
+
"预训练的中文BERT模型路径": "預訓練的中文BERT模型路徑"
|
172 |
+
}
|
tools/i18n/locale/zh_TW.json
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb):對於雙通道混響是最好的選擇,不能去除單通道混響;",
|
3 |
+
"(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:去除延遲效果。Aggressive 比 Normal 去除得更徹底,DeReverb 額外去除混響,可去除單聲道混響,但是對高頻重的板式混響去不乾淨。",
|
4 |
+
"*GPT模型列表": "*GPT模型列表",
|
5 |
+
"*SoVITS模型列表": "*SoVITS模型列表",
|
6 |
+
"*实验/模型名": "*實驗/模型名",
|
7 |
+
"*文本标注文件": "*文本標注文件",
|
8 |
+
"*训练集音频文件目录": "*訓練集音頻文件目錄",
|
9 |
+
"*请上传并填写参考信息": "*請上傳並填寫參考資訊",
|
10 |
+
"*请填写需要合成的目标文本和语种模式": "請填寫需要合成的目標文本和語言模式",
|
11 |
+
".list标注文件的路径": ".list標注文件的路徑",
|
12 |
+
"0-前置数据集获取工具": "0-前置數據集獲取工具",
|
13 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5人聲伴奏分離&去混響去延遲工具",
|
14 |
+
"0b-语音切分工具": "0b-語音切分工具",
|
15 |
+
"0bb-语音降噪工具": "0bb-語音降噪工具",
|
16 |
+
"0c-中文批量离线ASR工具": "0c-中文批量離線ASR工具",
|
17 |
+
"0d-语音文本校对标注工具": "0d-語音文本校對標注工具",
|
18 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
19 |
+
"1A-训练集格式化工具": "1A-訓練集格式化工具",
|
20 |
+
"1Aa-文本内容": "1Aa-文本內容",
|
21 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-訓練集格式化一鍵三連",
|
22 |
+
"1Ab-SSL自监督特征提取": "1Ab-SSL自監督特徵提取",
|
23 |
+
"1Ac-语义token提取": "1Ac-語義token提取",
|
24 |
+
"1B-微调训练": "1B-微調訓練",
|
25 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS訓練。用於分享的模型文件輸出在SoVITS_weights下。",
|
26 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT訓練。用於分享的模型文件輸出在GPT_weights下。",
|
27 |
+
"1C-推理": "1C-推理",
|
28 |
+
"1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1、DeEcho-DeReverb 模型的耗時是另外兩個 DeEcho 模型的接近兩倍;",
|
29 |
+
"1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;": "1、保留人聲:不帶和聲的音頻選這個,對主人聲保留比HP5更好。內置HP2和HP3兩個模型,HP3可能輕微漏伴奏但對主人聲保留比HP2稍微好一丁點;",
|
30 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-變聲",
|
31 |
+
"2、MDX-Net-Dereverb模型挺慢的;": "2、MDX-Net-Dereverb模型挺慢的;",
|
32 |
+
"2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;": "2、僅保留主人聲:帶和聲的音頻選這個,對主人聲可能有削弱。內置HP5一個模型;",
|
33 |
+
"3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "3、個人推薦的最乾淨的配置是先 MDX-Net 再 DeEcho-Aggressive。",
|
34 |
+
"3、去混响、去延迟模型(by FoxJoy):": "3、去混響、去延遲模型(by FoxJoy):",
|
35 |
+
"ASR 模型": "ASR 模型",
|
36 |
+
"ASR 模型尺寸": "ASR 模型尺寸",
|
37 |
+
"数据类型精度": "數據類型精度",
|
38 |
+
"ASR 语言设置": "ASR 語言設置",
|
39 |
+
"ASR进程输出信息": "ASR進程輸出資訊",
|
40 |
+
"GPT模型列表": "GPT模型列表",
|
41 |
+
"GPT训练进程输出信息": "GPT訓練進程輸出資訊",
|
42 |
+
"GPU卡号,只能填1个整数": "GPU卡號,只能填1個整數",
|
43 |
+
"GPU卡号以-分割,每个卡号一个进程": "GPU卡號以-分割,每個卡號一個進程",
|
44 |
+
"SSL进程输出信息": "SSL進程輸出資訊",
|
45 |
+
"SoVITS模型列表": "SoVITS模型列表",
|
46 |
+
"SoVITS训练进程输出信息": "SoVITS訓練進程輸出資訊",
|
47 |
+
"TTS推理WebUI进程输出信息": "TTS推理WebUI進程輸出資訊",
|
48 |
+
"TTS推理进程已关闭": "TTS推理進程已關閉",
|
49 |
+
"TTS推理进程已开启": "TTS推理進程已開啟",
|
50 |
+
"UVR5已关闭": "UVR5已關閉",
|
51 |
+
"UVR5已开启": "UVR5已開啟",
|
52 |
+
"UVR5进程输出信息": "UVR5進程輸出資訊",
|
53 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix:混多少比例歸一化後音頻進來",
|
54 |
+
"gpt采样参数(无参考文本时不要太低。不懂就用默认):": "GPT 采样参数(无参考文本时不要太低。不懂就用默认):",
|
55 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size:怎麼算音量曲線,越小精度越大計算量越高(不是精度越大效果越好)",
|
56 |
+
"max:归一化后最���值多少": "max:歸一化後最大值多少",
|
57 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept:切完後靜音最多留多長",
|
58 |
+
"min_interval:最短切割间隔": "min_interval:最短切割間隔",
|
59 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:每段最小多長,如果第一段太短一直和後面段連起來直到超過這個值",
|
60 |
+
"temperature": "temperature",
|
61 |
+
"threshold:音量小于这个值视作静音的备选切割点": "threshold:音量小於這個值視作靜音的備選切割點",
|
62 |
+
"top_k": "top_k",
|
63 |
+
"top_p": "top_p",
|
64 |
+
"一键三连进程输出信息": "一鍵三連進程輸出資訊",
|
65 |
+
"不切": "不切",
|
66 |
+
"中文": "中文",
|
67 |
+
"中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e": "中文教程文檔:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e",
|
68 |
+
"中英混合": "中英混合",
|
69 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量输入音频文件, 二选一, 优先读文件夹",
|
70 |
+
"人声伴奏分离批量处理, 使用UVR5模型。": "人聲伴奏分離批量處理, 使用UVR5模型。",
|
71 |
+
"人声提取激进程度": "人聲提取激進程度",
|
72 |
+
"伴奏人声分离&去混响&去回声": "伴奏人聲分離&去混響&去回聲",
|
73 |
+
"使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。": "使用無參考文本模式時建議使用微調的GPT,聽不清參考音頻說的啥(不曉得寫啥)可以開,開啟後無視填寫的參考文本。",
|
74 |
+
"保存频率save_every_epoch": "保存頻率save_every_epoch",
|
75 |
+
"凑50字一切": "湊50字一切",
|
76 |
+
"凑四句一切": "湊四句一切",
|
77 |
+
"切分后文本": "切分後文本",
|
78 |
+
"切分后的子音频的输出根目录": "切分後的子音頻的輸出根目錄",
|
79 |
+
"切割使用的进程数": "切割使用的進程數",
|
80 |
+
"刷新模型路径": "刷新模型路徑",
|
81 |
+
"前端处理后的文本(每句):": "前端處理後的文本(每句):",
|
82 |
+
"去混响/去延迟,附:": "去混響/去延遲,附:",
|
83 |
+
"参考音频在3~10秒范围外,请更换!": "參考音頻在3~10秒範圍外,請更換!",
|
84 |
+
"参考音频的文本": "參考音頻的文本",
|
85 |
+
"参考音频的语种": "參考音頻的語種",
|
86 |
+
"合成语音": "合成語音",
|
87 |
+
"合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。": "合格的資料夾路徑格式舉例: E:\\codes\\py39\\vits_vc_gpu\\白鷺霜華測試範例(去文件管理器地址欄拷就行了)。",
|
88 |
+
"后续将支持转音素、手工修改音素、语音合成分步执行。": "後續將支持轉音素、手工修改音素、語音合成分步執行。",
|
89 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。": "填切割後音頻所在目錄!讀取的音頻檔案完整路徑=該目錄-拼接-list檔案裡波形對應的檔案名(不是全路徑)。如果留空則使用.list檔案裡的絕對全路徑。",
|
90 |
+
"多语种混合": "多語種混合",
|
91 |
+
"实际输入的参考文本:": "實際輸入的參考文本:",
|
92 |
+
"实际输入的目标文本(切句后):": "實際輸入的目標文本(切句後):",
|
93 |
+
"实际输入的目标文本(每句):": "實際輸入的目標文本(每句):",
|
94 |
+
"实际输入的目标文本:": "實際輸入的目標文本:",
|
95 |
+
"导出文件格式": "導出檔格式",
|
96 |
+
"开启GPT训练": "開啟GPT訓練",
|
97 |
+
"开启SSL提取": "開啟SSL提取",
|
98 |
+
"开启SoVITS训练": "開啟SoVITS訓練",
|
99 |
+
"开启一键三连": "開啟一鍵三連",
|
100 |
+
"开启文本获取": "開啟文本獲取",
|
101 |
+
"开启无参考文本模式。不填参考文本亦相当于开启。": "開啟無參考文本模式。不填參考文本亦相當於開啟。",
|
102 |
+
"开启离线批量ASR": "開啟離線批量ASR",
|
103 |
+
"开启语义token提取": "開啟語義token提取",
|
104 |
+
"开启语音切割": "開啟語音切割",
|
105 |
+
"开启语音降噪": "開啟語音降噪",
|
106 |
+
"怎么切": "怎麼切",
|
107 |
+
"总训练轮数total_epoch": "總訓練輪數total_epoch",
|
108 |
+
"总训练轮数total_epoch,不建议太高": "總訓練輪數total_epoch,不建議太高",
|
109 |
+
"打标工具WebUI已关闭": "打標工具WebUI已關閉",
|
110 |
+
"打标工具WebUI已开启": "打標工具WebUI已開啟",
|
111 |
+
"打标工具进程输出信息": "打標工具進程輸出資訊",
|
112 |
+
"指定输出主人声文件夹": "指定输出主人声文件夹",
|
113 |
+
"指定输出非主人声文件夹": "指定输出非主人声文件夹",
|
114 |
+
"按中文句号。切": "按中文句號。切",
|
115 |
+
"按标点符号切": "按標點符號切",
|
116 |
+
"按英文句号.切": "按英文句號.切",
|
117 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "文本切分工具。太長的文本合成出來效果不一定好,所以太長建議先切。合成會根據文本的換行分開合成再拼起來。",
|
118 |
+
"文本模块学习率权重": "文本模塊學習率權重",
|
119 |
+
"文本进程输出信息": "文本進程輸出資訊",
|
120 |
+
"施工中,请静候佳音": "施工中,請靜候佳音",
|
121 |
+
"日文": "日文",
|
122 |
+
"日英混合": "日英混合",
|
123 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "是否僅保存最新的ckpt文件以節省硬盤空間",
|
124 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存時間點將最終小模型保存至weights文件夾",
|
125 |
+
"是否开启TTS推理WebUI": "是否開啟TTS推理WebUI",
|
126 |
+
"是否开启UVR5-WebUI": "是否開啟UVR5-WebUI",
|
127 |
+
"是否开启dpo训练选项(实验性)": "是否開啟dpo訓練選項(實驗性)",
|
128 |
+
"是否开启打标WebUI": "是否開啟打標WebUI",
|
129 |
+
"是否直接对上次合成结果调整语速。防止随机性。": "是否直接對上次合成結果調整語速。防止隨機性。",
|
130 |
+
"显卡信息": "顯卡資訊",
|
131 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "本軟體以MIT協議開源,作者不對軟體具備任何控制力,使用軟體者、傳播軟體導出的聲音者自負全責。<br>如不認可該條款,則不能使用或引用軟體包內任何代碼和文件。詳見根目錄<b>LICENSE</b>。",
|
132 |
+
"模型": "模型",
|
133 |
+
"模型分为三类:": "模型分為三類:",
|
134 |
+
"模型切换": "模型切換",
|
135 |
+
"每张显卡的batch_size": "每張顯卡的batch_size",
|
136 |
+
"终止ASR进程": "終止ASR進程",
|
137 |
+
"终止GPT训练": "終止GPT訓練",
|
138 |
+
"终止SSL提取进程": "終止SSL提取進程",
|
139 |
+
"终止SoVITS训练": "終止SoVITS訓練",
|
140 |
+
"终止一键三连": "終止一鍵三連",
|
141 |
+
"终止文本获取进程": "終止文本獲取進程",
|
142 |
+
"终止语义token提取进程": "終止語義token提取進程",
|
143 |
+
"终止语音切割": "終止語音切割",
|
144 |
+
"终止语音降噪进程": "終止語音降噪進程",
|
145 |
+
"英文": "英文",
|
146 |
+
"语义token提取进程输出信息": "語義token提取進程輸出資訊",
|
147 |
+
"语速": "語速",
|
148 |
+
"语速调整,高为更快": "調整語速,高為更快",
|
149 |
+
"语音切割进程输出信息": "語音切割進程輸出資訊",
|
150 |
+
"语音降噪进程输出信息": "語音降噪進程輸出資訊",
|
151 |
+
"请上传3~10秒内参考音频,超过会报错!": "請上傳3~10秒內參考音頻,超過會報錯!",
|
152 |
+
"请输入有效文本": "請輸入有效文本",
|
153 |
+
"转换": "轉換",
|
154 |
+
"输入待处理音频文件夹路径": "輸入待處理音頻資料夾路徑",
|
155 |
+
"输入文件夹路径": "輸入文件夾路徑",
|
156 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "輸出logs/實驗名目錄下應有23456開頭的文件和文件夾",
|
157 |
+
"输出信息": "輸出訊息",
|
158 |
+
"输出文件夹路径": "輸出文件夾路徑",
|
159 |
+
"输出的语音": "輸出的語音",
|
160 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "選擇訓練完存放在SoVITS_weights和GPT_weights下的模型。默認的一個是底模,體驗5秒Zero Shot TTS用。",
|
161 |
+
"降噪结果输出文件夹": "降噪結果輸出文件夾",
|
162 |
+
"降噪音频文件输入文件夹": "降噪音頻文件輸入文件夾",
|
163 |
+
"需要合成的切分前文本": "需要合成的切分前文本",
|
164 |
+
"需要合成的文本": "需要合成的文本",
|
165 |
+
"需要合成的语种": "需要合成的語種",
|
166 |
+
"音频自动切分输入路径,可文件可文件夹": "音頻自動切分輸入路徑,可文件可文件夾",
|
167 |
+
"预训练的GPT模型路径": "預訓練的GPT模型路徑",
|
168 |
+
"预训练的SSL模型路径": "預訓練的SSL模型路徑",
|
169 |
+
"预训练的SoVITS-D模型路径": "預訓練的SoVITS-D模型路徑",
|
170 |
+
"预训练的SoVITS-G模型路径": "預訓練的SoVITS-G模型路徑",
|
171 |
+
"预训练的中文BERT模型路径": "預訓練的中文BERT模型路徑"
|
172 |
+
}
|
tools/i18n/scan_i18n.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ast
|
2 |
+
import glob
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
from collections import OrderedDict
|
6 |
+
|
7 |
+
I18N_JSON_DIR : os.PathLike = os.path.join(os.path.dirname(os.path.relpath(__file__)), 'locale')
|
8 |
+
DEFAULT_LANGUAGE: str = "zh_CN" # 默认语言
|
9 |
+
TITLE_LEN : int = 60 # 标题显示长度
|
10 |
+
KEY_LEN : int = 30 # 键名显示长度
|
11 |
+
SHOW_KEYS : bool = False # 是否显示键信息
|
12 |
+
|
13 |
+
def extract_i18n_strings(node):
|
14 |
+
i18n_strings = []
|
15 |
+
|
16 |
+
if (
|
17 |
+
isinstance(node, ast.Call)
|
18 |
+
and isinstance(node.func, ast.Name)
|
19 |
+
and node.func.id == "i18n"
|
20 |
+
):
|
21 |
+
for arg in node.args:
|
22 |
+
if isinstance(arg, ast.Str):
|
23 |
+
i18n_strings.append(arg.s)
|
24 |
+
|
25 |
+
for child_node in ast.iter_child_nodes(node):
|
26 |
+
i18n_strings.extend(extract_i18n_strings(child_node))
|
27 |
+
|
28 |
+
return i18n_strings
|
29 |
+
|
30 |
+
def scan_i18n_strings():
|
31 |
+
"""
|
32 |
+
scan the directory for all .py files (recursively)
|
33 |
+
for each file, parse the code into an AST
|
34 |
+
for each AST, extract the i18n strings
|
35 |
+
"""
|
36 |
+
strings = []
|
37 |
+
print(" Scanning Files and Extracting i18n Strings ".center(TITLE_LEN, "="))
|
38 |
+
for filename in glob.iglob("**/*.py", recursive=True):
|
39 |
+
with open(filename, "r", encoding="utf-8") as f:
|
40 |
+
code = f.read()
|
41 |
+
if "I18nAuto" in code:
|
42 |
+
tree = ast.parse(code)
|
43 |
+
i18n_strings = extract_i18n_strings(tree)
|
44 |
+
print(f"{filename.ljust(30)}: {len(i18n_strings)}")
|
45 |
+
strings.extend(i18n_strings)
|
46 |
+
|
47 |
+
code_keys = set(strings)
|
48 |
+
print(f"{'Total Unique'.ljust(30)}: {len(code_keys)}")
|
49 |
+
return code_keys
|
50 |
+
|
51 |
+
def update_i18n_json(json_file, standard_keys):
|
52 |
+
print(f" Process {json_file} ".center(TITLE_LEN, "="))
|
53 |
+
# 读取 JSON 文件
|
54 |
+
with open(json_file, "r", encoding="utf-8") as f:
|
55 |
+
json_data = json.load(f, object_pairs_hook=OrderedDict)
|
56 |
+
# 打印处理前的 JSON 条目数
|
57 |
+
len_before = len(json_data)
|
58 |
+
print(f"{'Total Keys'.ljust(KEY_LEN)}: {len_before}")
|
59 |
+
# 识别缺失的键并补全
|
60 |
+
miss_keys = set(standard_keys) - set(json_data.keys())
|
61 |
+
if len(miss_keys) > 0:
|
62 |
+
print(f"{'Missing Keys (+)'.ljust(KEY_LEN)}: {len(miss_keys)}")
|
63 |
+
for key in miss_keys:
|
64 |
+
if DEFAULT_LANGUAGE in json_file:
|
65 |
+
# 默认语言的键值相同.
|
66 |
+
json_data[key] = key
|
67 |
+
else:
|
68 |
+
# 其他语言的值设置为 #! + 键名以标注未被翻译.
|
69 |
+
json_data[key] = "#!" + key
|
70 |
+
if SHOW_KEYS:
|
71 |
+
print(f"{'Added Missing Key'.ljust(KEY_LEN)}: {key}")
|
72 |
+
# 识别多余的键并删除
|
73 |
+
diff_keys = set(json_data.keys()) - set(standard_keys)
|
74 |
+
if len(diff_keys) > 0:
|
75 |
+
print(f"{'Unused Keys (-)'.ljust(KEY_LEN)}: {len(diff_keys)}")
|
76 |
+
for key in diff_keys:
|
77 |
+
del json_data[key]
|
78 |
+
if SHOW_KEYS:
|
79 |
+
print(f"{'Removed Unused Key'.ljust(KEY_LEN)}: {key}")
|
80 |
+
# 按键顺序排序
|
81 |
+
json_data = OrderedDict(
|
82 |
+
sorted(json_data.items(),
|
83 |
+
key=lambda x: list(standard_keys).index(x[0])))
|
84 |
+
# 打印处理后的 JSON 条目数
|
85 |
+
if len(miss_keys) != 0 or len(diff_keys) != 0:
|
86 |
+
print(f"{'Total Keys (After)'.ljust(KEY_LEN)}: {len(json_data)}")
|
87 |
+
# 识别有待翻译的键
|
88 |
+
num_miss_translation = 0
|
89 |
+
duplicate_items = {}
|
90 |
+
for key, value in json_data.items():
|
91 |
+
if value.startswith("#!"):
|
92 |
+
num_miss_translation += 1
|
93 |
+
if SHOW_KEYS:
|
94 |
+
print(f"{'Missing Translation'.ljust(KEY_LEN)}: {key}")
|
95 |
+
if value in duplicate_items:
|
96 |
+
duplicate_items[value].append(key)
|
97 |
+
else:
|
98 |
+
duplicate_items[value] = [key]
|
99 |
+
# 打印是否有重复的值
|
100 |
+
for value, keys in duplicate_items.items():
|
101 |
+
if len(keys) > 1:
|
102 |
+
print("\n".join([f"\033[31m{'[Failed] Duplicate Value'.ljust(KEY_LEN)}: {key} -> {value}\033[0m" for key in keys]))
|
103 |
+
|
104 |
+
if num_miss_translation > 0:
|
105 |
+
print(f"\033[31m{'[Failed] Missing Translation'.ljust(KEY_LEN)}: {num_miss_translation}\033[0m")
|
106 |
+
else:
|
107 |
+
print(f"\033[32m[Passed] All Keys Translated\033[0m")
|
108 |
+
# 将处理后的结果写入 JSON 文件
|
109 |
+
with open(json_file, "w", encoding="utf-8") as f:
|
110 |
+
json.dump(json_data, f, ensure_ascii=False, indent=4, sort_keys=True)
|
111 |
+
f.write("\n")
|
112 |
+
print(f" Updated {json_file} ".center(TITLE_LEN, "=") + '\n')
|
113 |
+
|
114 |
+
if __name__ == "__main__":
|
115 |
+
code_keys = scan_i18n_strings()
|
116 |
+
for json_file in os.listdir(I18N_JSON_DIR):
|
117 |
+
if json_file.endswith(r".json"):
|
118 |
+
json_file = os.path.join(I18N_JSON_DIR, json_file)
|
119 |
+
update_i18n_json(json_file, code_keys)
|
tools/my_utils.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import platform,os,traceback
|
2 |
+
import ffmpeg
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
|
6 |
+
def load_audio(file, sr):
|
7 |
+
try:
|
8 |
+
# https://github.com/openai/whisper/blob/main/whisper/audio.py#L26
|
9 |
+
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
|
10 |
+
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
|
11 |
+
file = clean_path(file) # 防止小白拷路径头尾带了空格和"和回车
|
12 |
+
if os.path.exists(file) == False:
|
13 |
+
raise RuntimeError(
|
14 |
+
"You input a wrong audio path that does not exists, please fix it!"
|
15 |
+
)
|
16 |
+
out, _ = (
|
17 |
+
ffmpeg.input(file, threads=0)
|
18 |
+
.output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr)
|
19 |
+
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
|
20 |
+
)
|
21 |
+
except Exception as e:
|
22 |
+
traceback.print_exc()
|
23 |
+
raise RuntimeError(f"Failed to load audio: {e}")
|
24 |
+
|
25 |
+
return np.frombuffer(out, np.float32).flatten()
|
26 |
+
|
27 |
+
|
28 |
+
def clean_path(path_str:str):
|
29 |
+
if path_str.endswith(('\\','/')):
|
30 |
+
return clean_path(path_str[0:-1])
|
31 |
+
if platform.system() == 'Windows':
|
32 |
+
path_str = path_str.replace('/', '\\')
|
33 |
+
return path_str.strip(" ").strip('"').strip("\n").strip('"').strip(" ").strip("\u202a")
|
tools/slice_audio.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os,sys,numpy as np
|
2 |
+
import traceback
|
3 |
+
from scipy.io import wavfile
|
4 |
+
# parent_directory = os.path.dirname(os.path.abspath(__file__))
|
5 |
+
# sys.path.append(parent_directory)
|
6 |
+
from tools.my_utils import load_audio
|
7 |
+
from slicer2 import Slicer
|
8 |
+
|
9 |
+
def slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,i_part,all_part):
|
10 |
+
os.makedirs(opt_root,exist_ok=True)
|
11 |
+
if os.path.isfile(inp):
|
12 |
+
input=[inp]
|
13 |
+
elif os.path.isdir(inp):
|
14 |
+
input=[os.path.join(inp, name) for name in sorted(list(os.listdir(inp)))]
|
15 |
+
else:
|
16 |
+
return "输入路径存在但既不是文件也不是文件夹"
|
17 |
+
slicer = Slicer(
|
18 |
+
sr=32000, # 长音频采样率
|
19 |
+
threshold= int(threshold), # 音量小于这个值视作静音的备选切割点
|
20 |
+
min_length= int(min_length), # 每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值
|
21 |
+
min_interval= int(min_interval), # 最短切割间隔
|
22 |
+
hop_size= int(hop_size), # 怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)
|
23 |
+
max_sil_kept= int(max_sil_kept), # 切完后静音最多留多长
|
24 |
+
)
|
25 |
+
_max=float(_max)
|
26 |
+
alpha=float(alpha)
|
27 |
+
for inp_path in input[int(i_part)::int(all_part)]:
|
28 |
+
# print(inp_path)
|
29 |
+
try:
|
30 |
+
name = os.path.basename(inp_path)
|
31 |
+
audio = load_audio(inp_path, 32000)
|
32 |
+
# print(audio.shape)
|
33 |
+
for chunk, start, end in slicer.slice(audio): # start和end是帧数
|
34 |
+
tmp_max = np.abs(chunk).max()
|
35 |
+
if(tmp_max>1):chunk/=tmp_max
|
36 |
+
chunk = (chunk / tmp_max * (_max * alpha)) + (1 - alpha) * chunk
|
37 |
+
wavfile.write(
|
38 |
+
"%s/%s_%010d_%010d.wav" % (opt_root, name, start, end),
|
39 |
+
32000,
|
40 |
+
# chunk.astype(np.float32),
|
41 |
+
(chunk * 32767).astype(np.int16),
|
42 |
+
)
|
43 |
+
except:
|
44 |
+
print(inp_path,"->fail->",traceback.format_exc())
|
45 |
+
return "执行完毕,请检查输出文件"
|
46 |
+
|
47 |
+
print(slice(*sys.argv[1:]))
|
48 |
+
|
tools/slicer2.py
ADDED
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
|
4 |
+
# This function is obtained from librosa.
|
5 |
+
def get_rms(
|
6 |
+
y,
|
7 |
+
frame_length=2048,
|
8 |
+
hop_length=512,
|
9 |
+
pad_mode="constant",
|
10 |
+
):
|
11 |
+
padding = (int(frame_length // 2), int(frame_length // 2))
|
12 |
+
y = np.pad(y, padding, mode=pad_mode)
|
13 |
+
|
14 |
+
axis = -1
|
15 |
+
# put our new within-frame axis at the end for now
|
16 |
+
out_strides = y.strides + tuple([y.strides[axis]])
|
17 |
+
# Reduce the shape on the framing axis
|
18 |
+
x_shape_trimmed = list(y.shape)
|
19 |
+
x_shape_trimmed[axis] -= frame_length - 1
|
20 |
+
out_shape = tuple(x_shape_trimmed) + tuple([frame_length])
|
21 |
+
xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides)
|
22 |
+
if axis < 0:
|
23 |
+
target_axis = axis - 1
|
24 |
+
else:
|
25 |
+
target_axis = axis + 1
|
26 |
+
xw = np.moveaxis(xw, -1, target_axis)
|
27 |
+
# Downsample along the target axis
|
28 |
+
slices = [slice(None)] * xw.ndim
|
29 |
+
slices[axis] = slice(0, None, hop_length)
|
30 |
+
x = xw[tuple(slices)]
|
31 |
+
|
32 |
+
# Calculate power
|
33 |
+
power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True)
|
34 |
+
|
35 |
+
return np.sqrt(power)
|
36 |
+
|
37 |
+
|
38 |
+
class Slicer:
|
39 |
+
def __init__(
|
40 |
+
self,
|
41 |
+
sr: int,
|
42 |
+
threshold: float = -40.0,
|
43 |
+
min_length: int = 5000,
|
44 |
+
min_interval: int = 300,
|
45 |
+
hop_size: int = 20,
|
46 |
+
max_sil_kept: int = 5000,
|
47 |
+
):
|
48 |
+
if not min_length >= min_interval >= hop_size:
|
49 |
+
raise ValueError(
|
50 |
+
"The following condition must be satisfied: min_length >= min_interval >= hop_size"
|
51 |
+
)
|
52 |
+
if not max_sil_kept >= hop_size:
|
53 |
+
raise ValueError(
|
54 |
+
"The following condition must be satisfied: max_sil_kept >= hop_size"
|
55 |
+
)
|
56 |
+
min_interval = sr * min_interval / 1000
|
57 |
+
self.threshold = 10 ** (threshold / 20.0)
|
58 |
+
self.hop_size = round(sr * hop_size / 1000)
|
59 |
+
self.win_size = min(round(min_interval), 4 * self.hop_size)
|
60 |
+
self.min_length = round(sr * min_length / 1000 / self.hop_size)
|
61 |
+
self.min_interval = round(min_interval / self.hop_size)
|
62 |
+
self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
|
63 |
+
|
64 |
+
def _apply_slice(self, waveform, begin, end):
|
65 |
+
if len(waveform.shape) > 1:
|
66 |
+
return waveform[
|
67 |
+
:, begin * self.hop_size : min(waveform.shape[1], end * self.hop_size)
|
68 |
+
]
|
69 |
+
else:
|
70 |
+
return waveform[
|
71 |
+
begin * self.hop_size : min(waveform.shape[0], end * self.hop_size)
|
72 |
+
]
|
73 |
+
|
74 |
+
# @timeit
|
75 |
+
def slice(self, waveform):
|
76 |
+
if len(waveform.shape) > 1:
|
77 |
+
samples = waveform.mean(axis=0)
|
78 |
+
else:
|
79 |
+
samples = waveform
|
80 |
+
if samples.shape[0] <= self.min_length:
|
81 |
+
return [waveform]
|
82 |
+
rms_list = get_rms(
|
83 |
+
y=samples, frame_length=self.win_size, hop_length=self.hop_size
|
84 |
+
).squeeze(0)
|
85 |
+
sil_tags = []
|
86 |
+
silence_start = None
|
87 |
+
clip_start = 0
|
88 |
+
for i, rms in enumerate(rms_list):
|
89 |
+
# Keep looping while frame is silent.
|
90 |
+
if rms < self.threshold:
|
91 |
+
# Record start of silent frames.
|
92 |
+
if silence_start is None:
|
93 |
+
silence_start = i
|
94 |
+
continue
|
95 |
+
# Keep looping while frame is not silent and silence start has not been recorded.
|
96 |
+
if silence_start is None:
|
97 |
+
continue
|
98 |
+
# Clear recorded silence start if interval is not enough or clip is too short
|
99 |
+
is_leading_silence = silence_start == 0 and i > self.max_sil_kept
|
100 |
+
need_slice_middle = (
|
101 |
+
i - silence_start >= self.min_interval
|
102 |
+
and i - clip_start >= self.min_length
|
103 |
+
)
|
104 |
+
if not is_leading_silence and not need_slice_middle:
|
105 |
+
silence_start = None
|
106 |
+
continue
|
107 |
+
# Need slicing. Record the range of silent frames to be removed.
|
108 |
+
if i - silence_start <= self.max_sil_kept:
|
109 |
+
pos = rms_list[silence_start : i + 1].argmin() + silence_start
|
110 |
+
if silence_start == 0:
|
111 |
+
sil_tags.append((0, pos))
|
112 |
+
else:
|
113 |
+
sil_tags.append((pos, pos))
|
114 |
+
clip_start = pos
|
115 |
+
elif i - silence_start <= self.max_sil_kept * 2:
|
116 |
+
pos = rms_list[
|
117 |
+
i - self.max_sil_kept : silence_start + self.max_sil_kept + 1
|
118 |
+
].argmin()
|
119 |
+
pos += i - self.max_sil_kept
|
120 |
+
pos_l = (
|
121 |
+
rms_list[
|
122 |
+
silence_start : silence_start + self.max_sil_kept + 1
|
123 |
+
].argmin()
|
124 |
+
+ silence_start
|
125 |
+
)
|
126 |
+
pos_r = (
|
127 |
+
rms_list[i - self.max_sil_kept : i + 1].argmin()
|
128 |
+
+ i
|
129 |
+
- self.max_sil_kept
|
130 |
+
)
|
131 |
+
if silence_start == 0:
|
132 |
+
sil_tags.append((0, pos_r))
|
133 |
+
clip_start = pos_r
|
134 |
+
else:
|
135 |
+
sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
|
136 |
+
clip_start = max(pos_r, pos)
|
137 |
+
else:
|
138 |
+
pos_l = (
|
139 |
+
rms_list[
|
140 |
+
silence_start : silence_start + self.max_sil_kept + 1
|
141 |
+
].argmin()
|
142 |
+
+ silence_start
|
143 |
+
)
|
144 |
+
pos_r = (
|
145 |
+
rms_list[i - self.max_sil_kept : i + 1].argmin()
|
146 |
+
+ i
|
147 |
+
- self.max_sil_kept
|
148 |
+
)
|
149 |
+
if silence_start == 0:
|
150 |
+
sil_tags.append((0, pos_r))
|
151 |
+
else:
|
152 |
+
sil_tags.append((pos_l, pos_r))
|
153 |
+
clip_start = pos_r
|
154 |
+
silence_start = None
|
155 |
+
# Deal with trailing silence.
|
156 |
+
total_frames = rms_list.shape[0]
|
157 |
+
if (
|
158 |
+
silence_start is not None
|
159 |
+
and total_frames - silence_start >= self.min_interval
|
160 |
+
):
|
161 |
+
silence_end = min(total_frames, silence_start + self.max_sil_kept)
|
162 |
+
pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start
|
163 |
+
sil_tags.append((pos, total_frames + 1))
|
164 |
+
# Apply and return slices.
|
165 |
+
####音频+起始时间+终止时间
|
166 |
+
if len(sil_tags) == 0:
|
167 |
+
return [[waveform,0,int(total_frames*self.hop_size)]]
|
168 |
+
else:
|
169 |
+
chunks = []
|
170 |
+
if sil_tags[0][0] > 0:
|
171 |
+
chunks.append([self._apply_slice(waveform, 0, sil_tags[0][0]),0,int(sil_tags[0][0]*self.hop_size)])
|
172 |
+
for i in range(len(sil_tags) - 1):
|
173 |
+
chunks.append(
|
174 |
+
[self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0]),int(sil_tags[i][1]*self.hop_size),int(sil_tags[i + 1][0]*self.hop_size)]
|
175 |
+
)
|
176 |
+
if sil_tags[-1][1] < total_frames:
|
177 |
+
chunks.append(
|
178 |
+
[self._apply_slice(waveform, sil_tags[-1][1], total_frames),int(sil_tags[-1][1]*self.hop_size),int(total_frames*self.hop_size)]
|
179 |
+
)
|
180 |
+
return chunks
|
181 |
+
|
182 |
+
|
183 |
+
def main():
|
184 |
+
import os.path
|
185 |
+
from argparse import ArgumentParser
|
186 |
+
|
187 |
+
import librosa
|
188 |
+
import soundfile
|
189 |
+
|
190 |
+
parser = ArgumentParser()
|
191 |
+
parser.add_argument("audio", type=str, help="The audio to be sliced")
|
192 |
+
parser.add_argument(
|
193 |
+
"--out", type=str, help="Output directory of the sliced audio clips"
|
194 |
+
)
|
195 |
+
parser.add_argument(
|
196 |
+
"--db_thresh",
|
197 |
+
type=float,
|
198 |
+
required=False,
|
199 |
+
default=-40,
|
200 |
+
help="The dB threshold for silence detection",
|
201 |
+
)
|
202 |
+
parser.add_argument(
|
203 |
+
"--min_length",
|
204 |
+
type=int,
|
205 |
+
required=False,
|
206 |
+
default=5000,
|
207 |
+
help="The minimum milliseconds required for each sliced audio clip",
|
208 |
+
)
|
209 |
+
parser.add_argument(
|
210 |
+
"--min_interval",
|
211 |
+
type=int,
|
212 |
+
required=False,
|
213 |
+
default=300,
|
214 |
+
help="The minimum milliseconds for a silence part to be sliced",
|
215 |
+
)
|
216 |
+
parser.add_argument(
|
217 |
+
"--hop_size",
|
218 |
+
type=int,
|
219 |
+
required=False,
|
220 |
+
default=10,
|
221 |
+
help="Frame length in milliseconds",
|
222 |
+
)
|
223 |
+
parser.add_argument(
|
224 |
+
"--max_sil_kept",
|
225 |
+
type=int,
|
226 |
+
required=False,
|
227 |
+
default=500,
|
228 |
+
help="The maximum silence length kept around the sliced clip, presented in milliseconds",
|
229 |
+
)
|
230 |
+
args = parser.parse_args()
|
231 |
+
out = args.out
|
232 |
+
if out is None:
|
233 |
+
out = os.path.dirname(os.path.abspath(args.audio))
|
234 |
+
audio, sr = librosa.load(args.audio, sr=None, mono=False)
|
235 |
+
slicer = Slicer(
|
236 |
+
sr=sr,
|
237 |
+
threshold=args.db_thresh,
|
238 |
+
min_length=args.min_length,
|
239 |
+
min_interval=args.min_interval,
|
240 |
+
hop_size=args.hop_size,
|
241 |
+
max_sil_kept=args.max_sil_kept,
|
242 |
+
)
|
243 |
+
chunks = slicer.slice(audio)
|
244 |
+
if not os.path.exists(out):
|
245 |
+
os.makedirs(out)
|
246 |
+
for i, chunk in enumerate(chunks):
|
247 |
+
if len(chunk.shape) > 1:
|
248 |
+
chunk = chunk.T
|
249 |
+
soundfile.write(
|
250 |
+
os.path.join(
|
251 |
+
out,
|
252 |
+
f"%s_%d.wav"
|
253 |
+
% (os.path.basename(args.audio).rsplit(".", maxsplit=1)[0], i),
|
254 |
+
),
|
255 |
+
chunk,
|
256 |
+
sr,
|
257 |
+
)
|
258 |
+
|
259 |
+
|
260 |
+
if __name__ == "__main__":
|
261 |
+
main()
|
tools/subfix_webui.py
ADDED
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse,os
|
2 |
+
import copy
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
import uuid
|
6 |
+
|
7 |
+
import librosa
|
8 |
+
import gradio as gr
|
9 |
+
import numpy as np
|
10 |
+
import soundfile
|
11 |
+
|
12 |
+
g_json_key_text = ""
|
13 |
+
g_json_key_path = ""
|
14 |
+
g_load_file = ""
|
15 |
+
g_load_format = ""
|
16 |
+
|
17 |
+
g_max_json_index = 0
|
18 |
+
g_index = 0
|
19 |
+
g_batch = 10
|
20 |
+
g_text_list = []
|
21 |
+
g_audio_list = []
|
22 |
+
g_checkbox_list = []
|
23 |
+
g_data_json = []
|
24 |
+
|
25 |
+
|
26 |
+
def reload_data(index, batch):
|
27 |
+
global g_index
|
28 |
+
g_index = index
|
29 |
+
global g_batch
|
30 |
+
g_batch = batch
|
31 |
+
datas = g_data_json[index:index+batch]
|
32 |
+
output = []
|
33 |
+
for d in datas:
|
34 |
+
output.append(
|
35 |
+
{
|
36 |
+
g_json_key_text: d[g_json_key_text],
|
37 |
+
g_json_key_path: d[g_json_key_path]
|
38 |
+
}
|
39 |
+
)
|
40 |
+
return output
|
41 |
+
|
42 |
+
|
43 |
+
def b_change_index(index, batch):
|
44 |
+
global g_index, g_batch
|
45 |
+
g_index, g_batch = index, batch
|
46 |
+
datas = reload_data(index, batch)
|
47 |
+
output = []
|
48 |
+
for i , _ in enumerate(datas):
|
49 |
+
output.append(
|
50 |
+
# gr.Textbox(
|
51 |
+
# label=f"Text {i+index}",
|
52 |
+
# value=_[g_json_key_text]#text
|
53 |
+
# )
|
54 |
+
{
|
55 |
+
"__type__":"update",
|
56 |
+
"label":f"Text {i+index}",
|
57 |
+
"value":_[g_json_key_text]
|
58 |
+
}
|
59 |
+
)
|
60 |
+
for _ in range(g_batch - len(datas)):
|
61 |
+
output.append(
|
62 |
+
# gr.Textbox(
|
63 |
+
# label=f"Text",
|
64 |
+
# value=""
|
65 |
+
# )
|
66 |
+
{
|
67 |
+
"__type__": "update",
|
68 |
+
"label": f"Text",
|
69 |
+
"value": ""
|
70 |
+
}
|
71 |
+
)
|
72 |
+
for _ in datas:
|
73 |
+
output.append(_[g_json_key_path])
|
74 |
+
for _ in range(g_batch - len(datas)):
|
75 |
+
output.append(None)
|
76 |
+
for _ in range(g_batch):
|
77 |
+
output.append(False)
|
78 |
+
return output
|
79 |
+
|
80 |
+
|
81 |
+
def b_next_index(index, batch):
|
82 |
+
b_save_file()
|
83 |
+
if (index + batch) <= g_max_json_index:
|
84 |
+
return index + batch , *b_change_index(index + batch, batch)
|
85 |
+
else:
|
86 |
+
return index, *b_change_index(index, batch)
|
87 |
+
|
88 |
+
|
89 |
+
def b_previous_index(index, batch):
|
90 |
+
b_save_file()
|
91 |
+
if (index - batch) >= 0:
|
92 |
+
return index - batch , *b_change_index(index - batch, batch)
|
93 |
+
else:
|
94 |
+
return 0, *b_change_index(0, batch)
|
95 |
+
|
96 |
+
|
97 |
+
def b_submit_change(*text_list):
|
98 |
+
global g_data_json
|
99 |
+
change = False
|
100 |
+
for i, new_text in enumerate(text_list):
|
101 |
+
if g_index + i <= g_max_json_index:
|
102 |
+
new_text = new_text.strip()+' '
|
103 |
+
if (g_data_json[g_index + i][g_json_key_text] != new_text):
|
104 |
+
g_data_json[g_index + i][g_json_key_text] = new_text
|
105 |
+
change = True
|
106 |
+
if change:
|
107 |
+
b_save_file()
|
108 |
+
return g_index, *b_change_index(g_index, g_batch)
|
109 |
+
|
110 |
+
|
111 |
+
def b_delete_audio(*checkbox_list):
|
112 |
+
global g_data_json, g_index, g_max_json_index
|
113 |
+
b_save_file()
|
114 |
+
change = False
|
115 |
+
for i, checkbox in reversed(list(enumerate(checkbox_list))):
|
116 |
+
if g_index + i < len(g_data_json):
|
117 |
+
if (checkbox == True):
|
118 |
+
g_data_json.pop(g_index + i)
|
119 |
+
change = True
|
120 |
+
|
121 |
+
g_max_json_index = len(g_data_json)-1
|
122 |
+
if g_index > g_max_json_index:
|
123 |
+
g_index = g_max_json_index
|
124 |
+
g_index = g_index if g_index >= 0 else 0
|
125 |
+
if change:
|
126 |
+
b_save_file()
|
127 |
+
# return gr.Slider(value=g_index, maximum=(g_max_json_index if g_max_json_index>=0 else 0)), *b_change_index(g_index, g_batch)
|
128 |
+
return {"value":g_index,"__type__":"update","maximum":(g_max_json_index if g_max_json_index>=0 else 0)},*b_change_index(g_index, g_batch)
|
129 |
+
|
130 |
+
|
131 |
+
def b_invert_selection(*checkbox_list):
|
132 |
+
new_list = [not item if item is True else True for item in checkbox_list]
|
133 |
+
return new_list
|
134 |
+
|
135 |
+
|
136 |
+
def get_next_path(filename):
|
137 |
+
base_dir = os.path.dirname(filename)
|
138 |
+
base_name = os.path.splitext(os.path.basename(filename))[0]
|
139 |
+
for i in range(100):
|
140 |
+
new_path = os.path.join(base_dir, f"{base_name}_{str(i).zfill(2)}.wav")
|
141 |
+
if not os.path.exists(new_path) :
|
142 |
+
return new_path
|
143 |
+
return os.path.join(base_dir, f'{str(uuid.uuid4())}.wav')
|
144 |
+
|
145 |
+
|
146 |
+
def b_audio_split(audio_breakpoint, *checkbox_list):
|
147 |
+
global g_data_json , g_max_json_index
|
148 |
+
checked_index = []
|
149 |
+
for i, checkbox in enumerate(checkbox_list):
|
150 |
+
if (checkbox == True and g_index+i < len(g_data_json)):
|
151 |
+
checked_index.append(g_index + i)
|
152 |
+
if len(checked_index) == 1 :
|
153 |
+
index = checked_index[0]
|
154 |
+
audio_json = copy.deepcopy(g_data_json[index])
|
155 |
+
path = audio_json[g_json_key_path]
|
156 |
+
data, sample_rate = librosa.load(path, sr=None, mono=True)
|
157 |
+
audio_maxframe = len(data)
|
158 |
+
break_frame = int(audio_breakpoint * sample_rate)
|
159 |
+
|
160 |
+
if (break_frame >= 1 and break_frame < audio_maxframe):
|
161 |
+
audio_first = data[0:break_frame]
|
162 |
+
audio_second = data[break_frame:]
|
163 |
+
nextpath = get_next_path(path)
|
164 |
+
soundfile.write(nextpath, audio_second, sample_rate)
|
165 |
+
soundfile.write(path, audio_first, sample_rate)
|
166 |
+
g_data_json.insert(index + 1, audio_json)
|
167 |
+
g_data_json[index + 1][g_json_key_path] = nextpath
|
168 |
+
b_save_file()
|
169 |
+
|
170 |
+
g_max_json_index = len(g_data_json) - 1
|
171 |
+
# return gr.Slider(value=g_index, maximum=g_max_json_index), *b_change_index(g_index, g_batch)
|
172 |
+
return {"value":g_index,"maximum":g_max_json_index,"__type__":"update"}, *b_change_index(g_index, g_batch)
|
173 |
+
|
174 |
+
def b_merge_audio(interval_r, *checkbox_list):
|
175 |
+
global g_data_json , g_max_json_index
|
176 |
+
b_save_file()
|
177 |
+
checked_index = []
|
178 |
+
audios_path = []
|
179 |
+
audios_text = []
|
180 |
+
for i, checkbox in enumerate(checkbox_list):
|
181 |
+
if (checkbox == True and g_index+i < len(g_data_json)):
|
182 |
+
checked_index.append(g_index + i)
|
183 |
+
|
184 |
+
if (len(checked_index)>1):
|
185 |
+
for i in checked_index:
|
186 |
+
audios_path.append(g_data_json[i][g_json_key_path])
|
187 |
+
audios_text.append(g_data_json[i][g_json_key_text])
|
188 |
+
for i in reversed(checked_index[1:]):
|
189 |
+
g_data_json.pop(i)
|
190 |
+
|
191 |
+
base_index = checked_index[0]
|
192 |
+
base_path = audios_path[0]
|
193 |
+
g_data_json[base_index][g_json_key_text] = "".join(audios_text)
|
194 |
+
|
195 |
+
audio_list = []
|
196 |
+
l_sample_rate = None
|
197 |
+
for i, path in enumerate(audios_path):
|
198 |
+
data, sample_rate = librosa.load(path, sr=l_sample_rate, mono=True)
|
199 |
+
l_sample_rate = sample_rate
|
200 |
+
if (i > 0):
|
201 |
+
silence = np.zeros(int(l_sample_rate * interval_r))
|
202 |
+
audio_list.append(silence)
|
203 |
+
|
204 |
+
audio_list.append(data)
|
205 |
+
|
206 |
+
audio_concat = np.concatenate(audio_list)
|
207 |
+
|
208 |
+
soundfile.write(base_path, audio_concat, l_sample_rate)
|
209 |
+
|
210 |
+
b_save_file()
|
211 |
+
|
212 |
+
g_max_json_index = len(g_data_json) - 1
|
213 |
+
|
214 |
+
# return gr.Slider(value=g_index, maximum=g_max_json_index), *b_change_index(g_index, g_batch)
|
215 |
+
return {"value":g_index,"maximum":g_max_json_index,"__type__":"update"}, *b_change_index(g_index, g_batch)
|
216 |
+
|
217 |
+
|
218 |
+
def b_save_json():
|
219 |
+
with open(g_load_file,'w', encoding="utf-8") as file:
|
220 |
+
for data in g_data_json:
|
221 |
+
file.write(f'{json.dumps(data, ensure_ascii = False)}\n')
|
222 |
+
|
223 |
+
|
224 |
+
def b_save_list():
|
225 |
+
with open(g_load_file,'w', encoding="utf-8") as file:
|
226 |
+
for data in g_data_json:
|
227 |
+
wav_path = data["wav_path"]
|
228 |
+
speaker_name = data["speaker_name"]
|
229 |
+
language = data["language"]
|
230 |
+
text = data["text"]
|
231 |
+
file.write(f"{wav_path}|{speaker_name}|{language}|{text}".strip()+'\n')
|
232 |
+
|
233 |
+
|
234 |
+
def b_load_json():
|
235 |
+
global g_data_json, g_max_json_index
|
236 |
+
with open(g_load_file, 'r', encoding="utf-8") as file:
|
237 |
+
g_data_json = file.readlines()
|
238 |
+
g_data_json = [json.loads(line) for line in g_data_json]
|
239 |
+
g_max_json_index = len(g_data_json) - 1
|
240 |
+
|
241 |
+
|
242 |
+
def b_load_list():
|
243 |
+
global g_data_json, g_max_json_index
|
244 |
+
with open(g_load_file, 'r', encoding="utf-8") as source:
|
245 |
+
data_list = source.readlines()
|
246 |
+
for _ in data_list:
|
247 |
+
data = _.split('|')
|
248 |
+
if (len(data) == 4):
|
249 |
+
wav_path, speaker_name, language, text = data
|
250 |
+
g_data_json.append(
|
251 |
+
{
|
252 |
+
'wav_path':wav_path,
|
253 |
+
'speaker_name':speaker_name,
|
254 |
+
'language':language,
|
255 |
+
'text':text.strip()
|
256 |
+
}
|
257 |
+
)
|
258 |
+
else:
|
259 |
+
print("error line:", data)
|
260 |
+
g_max_json_index = len(g_data_json) - 1
|
261 |
+
|
262 |
+
|
263 |
+
def b_save_file():
|
264 |
+
if g_load_format == "json":
|
265 |
+
b_save_json()
|
266 |
+
elif g_load_format == "list":
|
267 |
+
b_save_list()
|
268 |
+
|
269 |
+
|
270 |
+
def b_load_file():
|
271 |
+
if g_load_format == "json":
|
272 |
+
b_load_json()
|
273 |
+
elif g_load_format == "list":
|
274 |
+
b_load_list()
|
275 |
+
|
276 |
+
|
277 |
+
def set_global(load_json, load_list, json_key_text, json_key_path, batch):
|
278 |
+
global g_json_key_text, g_json_key_path, g_load_file, g_load_format, g_batch
|
279 |
+
|
280 |
+
g_batch = int(batch)
|
281 |
+
|
282 |
+
if (load_json != "None"):
|
283 |
+
g_load_format = "json"
|
284 |
+
g_load_file = load_json
|
285 |
+
elif (load_list != "None"):
|
286 |
+
g_load_format = "list"
|
287 |
+
g_load_file = load_list
|
288 |
+
else:
|
289 |
+
g_load_format = "list"
|
290 |
+
g_load_file = "demo.list"
|
291 |
+
|
292 |
+
g_json_key_text = json_key_text
|
293 |
+
g_json_key_path = json_key_path
|
294 |
+
|
295 |
+
b_load_file()
|
296 |
+
|
297 |
+
|
298 |
+
if __name__ == "__main__":
|
299 |
+
parser = argparse.ArgumentParser(description='Process some integers.')
|
300 |
+
parser.add_argument('--load_json', default="None", help='source file, like demo.json')
|
301 |
+
parser.add_argument('--is_share', default="False", help='whether webui is_share=True')
|
302 |
+
parser.add_argument('--load_list', default="None", help='source file, like demo.list')
|
303 |
+
parser.add_argument('--webui_port_subfix', default=9871, help='source file, like demo.list')
|
304 |
+
parser.add_argument('--json_key_text', default="text", help='the text key name in json, Default: text')
|
305 |
+
parser.add_argument('--json_key_path', default="wav_path", help='the path key name in json, Default: wav_path')
|
306 |
+
parser.add_argument('--g_batch', default=10, help='max number g_batch wav to display, Default: 10')
|
307 |
+
|
308 |
+
args = parser.parse_args()
|
309 |
+
|
310 |
+
set_global(args.load_json, args.load_list, args.json_key_text, args.json_key_path, args.g_batch)
|
311 |
+
|
312 |
+
with gr.Blocks() as demo:
|
313 |
+
|
314 |
+
with gr.Row():
|
315 |
+
btn_change_index = gr.Button("Change Index")
|
316 |
+
btn_submit_change = gr.Button("Submit Text")
|
317 |
+
btn_merge_audio = gr.Button("Merge Audio")
|
318 |
+
btn_delete_audio = gr.Button("Delete Audio")
|
319 |
+
btn_previous_index = gr.Button("Previous Index")
|
320 |
+
btn_next_index = gr.Button("Next Index")
|
321 |
+
|
322 |
+
with gr.Row():
|
323 |
+
index_slider = gr.Slider(
|
324 |
+
minimum=0, maximum=g_max_json_index, value=g_index, step=1, label="Index", scale=3
|
325 |
+
)
|
326 |
+
splitpoint_slider = gr.Slider(
|
327 |
+
minimum=0, maximum=120.0, value=0, step=0.1, label="Audio Split Point(s)", scale=3
|
328 |
+
)
|
329 |
+
btn_audio_split = gr.Button("Split Audio", scale=1)
|
330 |
+
btn_save_json = gr.Button("Save File", visible=True, scale=1)
|
331 |
+
btn_invert_selection = gr.Button("Invert Selection", scale=1)
|
332 |
+
|
333 |
+
with gr.Row():
|
334 |
+
with gr.Column():
|
335 |
+
for _ in range(0,g_batch):
|
336 |
+
with gr.Row():
|
337 |
+
text = gr.Textbox(
|
338 |
+
label = "Text",
|
339 |
+
visible = True,
|
340 |
+
scale=5
|
341 |
+
)
|
342 |
+
audio_output = gr.Audio(
|
343 |
+
label="Output Audio",
|
344 |
+
visible = True,
|
345 |
+
scale=5
|
346 |
+
)
|
347 |
+
audio_check = gr.Checkbox(
|
348 |
+
label="Yes",
|
349 |
+
show_label = True,
|
350 |
+
info = "Choose Audio",
|
351 |
+
scale=1
|
352 |
+
)
|
353 |
+
g_text_list.append(text)
|
354 |
+
g_audio_list.append(audio_output)
|
355 |
+
g_checkbox_list.append(audio_check)
|
356 |
+
|
357 |
+
|
358 |
+
|
359 |
+
with gr.Row():
|
360 |
+
batchsize_slider = gr.Slider(
|
361 |
+
minimum=1, maximum=g_batch, value=g_batch, step=1, label="Batch Size", scale=3, interactive=False
|
362 |
+
)
|
363 |
+
interval_slider = gr.Slider(
|
364 |
+
minimum=0, maximum=2, value=0, step=0.01, label="Interval", scale=3
|
365 |
+
)
|
366 |
+
btn_theme_dark = gr.Button("Light Theme", link="?__theme=light", scale=1)
|
367 |
+
btn_theme_light = gr.Button("Dark Theme", link="?__theme=dark", scale=1)
|
368 |
+
|
369 |
+
btn_change_index.click(
|
370 |
+
b_change_index,
|
371 |
+
inputs=[
|
372 |
+
index_slider,
|
373 |
+
batchsize_slider,
|
374 |
+
],
|
375 |
+
outputs=[
|
376 |
+
*g_text_list,
|
377 |
+
*g_audio_list,
|
378 |
+
*g_checkbox_list
|
379 |
+
],
|
380 |
+
)
|
381 |
+
|
382 |
+
|
383 |
+
btn_submit_change.click(
|
384 |
+
b_submit_change,
|
385 |
+
inputs=[
|
386 |
+
*g_text_list,
|
387 |
+
],
|
388 |
+
outputs=[
|
389 |
+
index_slider,
|
390 |
+
*g_text_list,
|
391 |
+
*g_audio_list,
|
392 |
+
*g_checkbox_list
|
393 |
+
],
|
394 |
+
)
|
395 |
+
|
396 |
+
btn_previous_index.click(
|
397 |
+
b_previous_index,
|
398 |
+
inputs=[
|
399 |
+
index_slider,
|
400 |
+
batchsize_slider,
|
401 |
+
],
|
402 |
+
outputs=[
|
403 |
+
index_slider,
|
404 |
+
*g_text_list,
|
405 |
+
*g_audio_list,
|
406 |
+
*g_checkbox_list
|
407 |
+
],
|
408 |
+
)
|
409 |
+
|
410 |
+
btn_next_index.click(
|
411 |
+
b_next_index,
|
412 |
+
inputs=[
|
413 |
+
index_slider,
|
414 |
+
batchsize_slider,
|
415 |
+
],
|
416 |
+
outputs=[
|
417 |
+
index_slider,
|
418 |
+
*g_text_list,
|
419 |
+
*g_audio_list,
|
420 |
+
*g_checkbox_list
|
421 |
+
],
|
422 |
+
)
|
423 |
+
|
424 |
+
btn_delete_audio.click(
|
425 |
+
b_delete_audio,
|
426 |
+
inputs=[
|
427 |
+
*g_checkbox_list
|
428 |
+
],
|
429 |
+
outputs=[
|
430 |
+
index_slider,
|
431 |
+
*g_text_list,
|
432 |
+
*g_audio_list,
|
433 |
+
*g_checkbox_list
|
434 |
+
]
|
435 |
+
)
|
436 |
+
|
437 |
+
btn_merge_audio.click(
|
438 |
+
b_merge_audio,
|
439 |
+
inputs=[
|
440 |
+
interval_slider,
|
441 |
+
*g_checkbox_list
|
442 |
+
],
|
443 |
+
outputs=[
|
444 |
+
index_slider,
|
445 |
+
*g_text_list,
|
446 |
+
*g_audio_list,
|
447 |
+
*g_checkbox_list
|
448 |
+
]
|
449 |
+
)
|
450 |
+
|
451 |
+
btn_audio_split.click(
|
452 |
+
b_audio_split,
|
453 |
+
inputs=[
|
454 |
+
splitpoint_slider,
|
455 |
+
*g_checkbox_list
|
456 |
+
],
|
457 |
+
outputs=[
|
458 |
+
index_slider,
|
459 |
+
*g_text_list,
|
460 |
+
*g_audio_list,
|
461 |
+
*g_checkbox_list
|
462 |
+
]
|
463 |
+
)
|
464 |
+
|
465 |
+
btn_invert_selection.click(
|
466 |
+
b_invert_selection,
|
467 |
+
inputs=[
|
468 |
+
*g_checkbox_list
|
469 |
+
],
|
470 |
+
outputs=[
|
471 |
+
*g_checkbox_list
|
472 |
+
]
|
473 |
+
)
|
474 |
+
|
475 |
+
btn_save_json.click(
|
476 |
+
b_save_file
|
477 |
+
)
|
478 |
+
|
479 |
+
demo.load(
|
480 |
+
b_change_index,
|
481 |
+
inputs=[
|
482 |
+
index_slider,
|
483 |
+
batchsize_slider,
|
484 |
+
],
|
485 |
+
outputs=[
|
486 |
+
*g_text_list,
|
487 |
+
*g_audio_list,
|
488 |
+
*g_checkbox_list
|
489 |
+
],
|
490 |
+
)
|
491 |
+
|
492 |
+
demo.launch(
|
493 |
+
server_name="0.0.0.0",
|
494 |
+
inbrowser=True,
|
495 |
+
quiet=True,
|
496 |
+
share=eval(args.is_share),
|
497 |
+
server_port=int(args.webui_port_subfix)
|
498 |
+
)
|
tools/uvr5/bs_roformer/__init__.py
ADDED
File without changes
|
tools/uvr5/bs_roformer/attend.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import wraps
|
2 |
+
from packaging import version
|
3 |
+
from collections import namedtuple
|
4 |
+
|
5 |
+
import torch
|
6 |
+
from torch import nn, einsum
|
7 |
+
import torch.nn.functional as F
|
8 |
+
|
9 |
+
from einops import rearrange, reduce
|
10 |
+
|
11 |
+
# constants
|
12 |
+
|
13 |
+
FlashAttentionConfig = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
|
14 |
+
|
15 |
+
# helpers
|
16 |
+
|
17 |
+
def exists(val):
|
18 |
+
return val is not None
|
19 |
+
|
20 |
+
def default(v, d):
|
21 |
+
return v if exists(v) else d
|
22 |
+
|
23 |
+
def once(fn):
|
24 |
+
called = False
|
25 |
+
@wraps(fn)
|
26 |
+
def inner(x):
|
27 |
+
nonlocal called
|
28 |
+
if called:
|
29 |
+
return
|
30 |
+
called = True
|
31 |
+
return fn(x)
|
32 |
+
return inner
|
33 |
+
|
34 |
+
print_once = once(print)
|
35 |
+
|
36 |
+
# main class
|
37 |
+
|
38 |
+
class Attend(nn.Module):
|
39 |
+
def __init__(
|
40 |
+
self,
|
41 |
+
dropout = 0.,
|
42 |
+
flash = False,
|
43 |
+
scale = None
|
44 |
+
):
|
45 |
+
super().__init__()
|
46 |
+
self.scale = scale
|
47 |
+
self.dropout = dropout
|
48 |
+
self.attn_dropout = nn.Dropout(dropout)
|
49 |
+
|
50 |
+
self.flash = flash
|
51 |
+
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
|
52 |
+
|
53 |
+
# determine efficient attention configs for cuda and cpu
|
54 |
+
|
55 |
+
self.cpu_config = FlashAttentionConfig(True, True, True)
|
56 |
+
self.cuda_config = None
|
57 |
+
|
58 |
+
if not torch.cuda.is_available() or not flash:
|
59 |
+
return
|
60 |
+
|
61 |
+
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
|
62 |
+
|
63 |
+
if device_properties.major == 8 and device_properties.minor == 0:
|
64 |
+
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
|
65 |
+
self.cuda_config = FlashAttentionConfig(True, False, False)
|
66 |
+
else:
|
67 |
+
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
|
68 |
+
self.cuda_config = FlashAttentionConfig(False, True, True)
|
69 |
+
|
70 |
+
def flash_attn(self, q, k, v):
|
71 |
+
_, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
|
72 |
+
|
73 |
+
if exists(self.scale):
|
74 |
+
default_scale = q.shape[-1] ** -0.5
|
75 |
+
q = q * (self.scale / default_scale)
|
76 |
+
|
77 |
+
# Check if there is a compatible device for flash attention
|
78 |
+
|
79 |
+
config = self.cuda_config if is_cuda else self.cpu_config
|
80 |
+
|
81 |
+
# pytorch 2.0 flash attn: q, k, v, mask, dropout, softmax_scale
|
82 |
+
|
83 |
+
with torch.backends.cuda.sdp_kernel(**config._asdict()):
|
84 |
+
out = F.scaled_dot_product_attention(
|
85 |
+
q, k, v,
|
86 |
+
dropout_p = self.dropout if self.training else 0.
|
87 |
+
)
|
88 |
+
|
89 |
+
return out
|
90 |
+
|
91 |
+
def forward(self, q, k, v):
|
92 |
+
"""
|
93 |
+
einstein notation
|
94 |
+
b - batch
|
95 |
+
h - heads
|
96 |
+
n, i, j - sequence length (base sequence length, source, target)
|
97 |
+
d - feature dimension
|
98 |
+
"""
|
99 |
+
|
100 |
+
q_len, k_len, device = q.shape[-2], k.shape[-2], q.device
|
101 |
+
|
102 |
+
scale = default(self.scale, q.shape[-1] ** -0.5)
|
103 |
+
|
104 |
+
if self.flash:
|
105 |
+
return self.flash_attn(q, k, v)
|
106 |
+
|
107 |
+
# similarity
|
108 |
+
|
109 |
+
sim = einsum(f"b h i d, b h j d -> b h i j", q, k) * scale
|
110 |
+
|
111 |
+
# attention
|
112 |
+
|
113 |
+
attn = sim.softmax(dim=-1)
|
114 |
+
attn = self.attn_dropout(attn)
|
115 |
+
|
116 |
+
# aggregate values
|
117 |
+
|
118 |
+
out = einsum(f"b h i j, b h j d -> b h i d", attn, v)
|
119 |
+
|
120 |
+
return out
|
tools/uvr5/bs_roformer/bs_roformer.py
ADDED
@@ -0,0 +1,583 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import partial
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch import nn, einsum, Tensor
|
5 |
+
from torch.nn import Module, ModuleList
|
6 |
+
import torch.nn.functional as F
|
7 |
+
|
8 |
+
from bs_roformer.attend import Attend
|
9 |
+
|
10 |
+
from typing import Tuple, Optional, List, Callable
|
11 |
+
# from beartype.typing import Tuple, Optional, List, Callable
|
12 |
+
# from beartype import beartype
|
13 |
+
|
14 |
+
from rotary_embedding_torch import RotaryEmbedding
|
15 |
+
|
16 |
+
from einops import rearrange, pack, unpack
|
17 |
+
from einops.layers.torch import Rearrange
|
18 |
+
|
19 |
+
# helper functions
|
20 |
+
|
21 |
+
def exists(val):
|
22 |
+
return val is not None
|
23 |
+
|
24 |
+
|
25 |
+
def default(v, d):
|
26 |
+
return v if exists(v) else d
|
27 |
+
|
28 |
+
|
29 |
+
def pack_one(t, pattern):
|
30 |
+
return pack([t], pattern)
|
31 |
+
|
32 |
+
|
33 |
+
def unpack_one(t, ps, pattern):
|
34 |
+
return unpack(t, ps, pattern)[0]
|
35 |
+
|
36 |
+
|
37 |
+
# norm
|
38 |
+
|
39 |
+
def l2norm(t):
|
40 |
+
return F.normalize(t, dim = -1, p = 2)
|
41 |
+
|
42 |
+
|
43 |
+
class RMSNorm(Module):
|
44 |
+
def __init__(self, dim):
|
45 |
+
super().__init__()
|
46 |
+
self.scale = dim ** 0.5
|
47 |
+
self.gamma = nn.Parameter(torch.ones(dim))
|
48 |
+
|
49 |
+
def forward(self, x):
|
50 |
+
return F.normalize(x, dim=-1) * self.scale * self.gamma
|
51 |
+
|
52 |
+
|
53 |
+
# attention
|
54 |
+
|
55 |
+
class FeedForward(Module):
|
56 |
+
def __init__(
|
57 |
+
self,
|
58 |
+
dim,
|
59 |
+
mult=4,
|
60 |
+
dropout=0.
|
61 |
+
):
|
62 |
+
super().__init__()
|
63 |
+
dim_inner = int(dim * mult)
|
64 |
+
self.net = nn.Sequential(
|
65 |
+
RMSNorm(dim),
|
66 |
+
nn.Linear(dim, dim_inner),
|
67 |
+
nn.GELU(),
|
68 |
+
nn.Dropout(dropout),
|
69 |
+
nn.Linear(dim_inner, dim),
|
70 |
+
nn.Dropout(dropout)
|
71 |
+
)
|
72 |
+
|
73 |
+
def forward(self, x):
|
74 |
+
return self.net(x)
|
75 |
+
|
76 |
+
|
77 |
+
class Attention(Module):
|
78 |
+
def __init__(
|
79 |
+
self,
|
80 |
+
dim,
|
81 |
+
heads=8,
|
82 |
+
dim_head=64,
|
83 |
+
dropout=0.,
|
84 |
+
rotary_embed=None,
|
85 |
+
flash=True
|
86 |
+
):
|
87 |
+
super().__init__()
|
88 |
+
self.heads = heads
|
89 |
+
self.scale = dim_head ** -0.5
|
90 |
+
dim_inner = heads * dim_head
|
91 |
+
|
92 |
+
self.rotary_embed = rotary_embed
|
93 |
+
|
94 |
+
self.attend = Attend(flash=flash, dropout=dropout)
|
95 |
+
|
96 |
+
self.norm = RMSNorm(dim)
|
97 |
+
self.to_qkv = nn.Linear(dim, dim_inner * 3, bias=False)
|
98 |
+
|
99 |
+
self.to_gates = nn.Linear(dim, heads)
|
100 |
+
|
101 |
+
self.to_out = nn.Sequential(
|
102 |
+
nn.Linear(dim_inner, dim, bias=False),
|
103 |
+
nn.Dropout(dropout)
|
104 |
+
)
|
105 |
+
|
106 |
+
def forward(self, x):
|
107 |
+
x = self.norm(x)
|
108 |
+
|
109 |
+
q, k, v = rearrange(self.to_qkv(x), 'b n (qkv h d) -> qkv b h n d', qkv=3, h=self.heads)
|
110 |
+
|
111 |
+
if exists(self.rotary_embed):
|
112 |
+
q = self.rotary_embed.rotate_queries_or_keys(q)
|
113 |
+
k = self.rotary_embed.rotate_queries_or_keys(k)
|
114 |
+
|
115 |
+
out = self.attend(q, k, v)
|
116 |
+
|
117 |
+
gates = self.to_gates(x)
|
118 |
+
out = out * rearrange(gates, 'b n h -> b h n 1').sigmoid()
|
119 |
+
|
120 |
+
out = rearrange(out, 'b h n d -> b n (h d)')
|
121 |
+
return self.to_out(out)
|
122 |
+
|
123 |
+
|
124 |
+
class LinearAttention(Module):
|
125 |
+
"""
|
126 |
+
this flavor of linear attention proposed in https://arxiv.org/abs/2106.09681 by El-Nouby et al.
|
127 |
+
"""
|
128 |
+
|
129 |
+
# @beartype
|
130 |
+
def __init__(
|
131 |
+
self,
|
132 |
+
*,
|
133 |
+
dim,
|
134 |
+
dim_head=32,
|
135 |
+
heads=8,
|
136 |
+
scale=8,
|
137 |
+
flash=False,
|
138 |
+
dropout=0.
|
139 |
+
):
|
140 |
+
super().__init__()
|
141 |
+
dim_inner = dim_head * heads
|
142 |
+
self.norm = RMSNorm(dim)
|
143 |
+
|
144 |
+
self.to_qkv = nn.Sequential(
|
145 |
+
nn.Linear(dim, dim_inner * 3, bias=False),
|
146 |
+
Rearrange('b n (qkv h d) -> qkv b h d n', qkv=3, h=heads)
|
147 |
+
)
|
148 |
+
|
149 |
+
self.temperature = nn.Parameter(torch.ones(heads, 1, 1))
|
150 |
+
|
151 |
+
self.attend = Attend(
|
152 |
+
scale=scale,
|
153 |
+
dropout=dropout,
|
154 |
+
flash=flash
|
155 |
+
)
|
156 |
+
|
157 |
+
self.to_out = nn.Sequential(
|
158 |
+
Rearrange('b h d n -> b n (h d)'),
|
159 |
+
nn.Linear(dim_inner, dim, bias=False)
|
160 |
+
)
|
161 |
+
|
162 |
+
def forward(
|
163 |
+
self,
|
164 |
+
x
|
165 |
+
):
|
166 |
+
x = self.norm(x)
|
167 |
+
|
168 |
+
q, k, v = self.to_qkv(x)
|
169 |
+
|
170 |
+
q, k = map(l2norm, (q, k))
|
171 |
+
q = q * self.temperature.exp()
|
172 |
+
|
173 |
+
out = self.attend(q, k, v)
|
174 |
+
|
175 |
+
return self.to_out(out)
|
176 |
+
|
177 |
+
|
178 |
+
class Transformer(Module):
|
179 |
+
def __init__(
|
180 |
+
self,
|
181 |
+
*,
|
182 |
+
dim,
|
183 |
+
depth,
|
184 |
+
dim_head=64,
|
185 |
+
heads=8,
|
186 |
+
attn_dropout=0.,
|
187 |
+
ff_dropout=0.,
|
188 |
+
ff_mult=4,
|
189 |
+
norm_output=True,
|
190 |
+
rotary_embed=None,
|
191 |
+
flash_attn=True,
|
192 |
+
linear_attn=False
|
193 |
+
):
|
194 |
+
super().__init__()
|
195 |
+
self.layers = ModuleList([])
|
196 |
+
|
197 |
+
for _ in range(depth):
|
198 |
+
if linear_attn:
|
199 |
+
attn = LinearAttention(dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout, flash=flash_attn)
|
200 |
+
else:
|
201 |
+
attn = Attention(dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout,
|
202 |
+
rotary_embed=rotary_embed, flash=flash_attn)
|
203 |
+
|
204 |
+
self.layers.append(ModuleList([
|
205 |
+
attn,
|
206 |
+
FeedForward(dim=dim, mult=ff_mult, dropout=ff_dropout)
|
207 |
+
]))
|
208 |
+
|
209 |
+
self.norm = RMSNorm(dim) if norm_output else nn.Identity()
|
210 |
+
|
211 |
+
def forward(self, x):
|
212 |
+
|
213 |
+
for attn, ff in self.layers:
|
214 |
+
x = attn(x) + x
|
215 |
+
x = ff(x) + x
|
216 |
+
|
217 |
+
return self.norm(x)
|
218 |
+
|
219 |
+
|
220 |
+
# bandsplit module
|
221 |
+
|
222 |
+
class BandSplit(Module):
|
223 |
+
# @beartype
|
224 |
+
def __init__(
|
225 |
+
self,
|
226 |
+
dim,
|
227 |
+
dim_inputs: Tuple[int, ...]
|
228 |
+
):
|
229 |
+
super().__init__()
|
230 |
+
self.dim_inputs = dim_inputs
|
231 |
+
self.to_features = ModuleList([])
|
232 |
+
|
233 |
+
for dim_in in dim_inputs:
|
234 |
+
net = nn.Sequential(
|
235 |
+
RMSNorm(dim_in),
|
236 |
+
nn.Linear(dim_in, dim)
|
237 |
+
)
|
238 |
+
|
239 |
+
self.to_features.append(net)
|
240 |
+
|
241 |
+
def forward(self, x):
|
242 |
+
x = x.split(self.dim_inputs, dim=-1)
|
243 |
+
|
244 |
+
outs = []
|
245 |
+
for split_input, to_feature in zip(x, self.to_features):
|
246 |
+
split_output = to_feature(split_input)
|
247 |
+
outs.append(split_output)
|
248 |
+
|
249 |
+
return torch.stack(outs, dim=-2)
|
250 |
+
|
251 |
+
|
252 |
+
def MLP(
|
253 |
+
dim_in,
|
254 |
+
dim_out,
|
255 |
+
dim_hidden=None,
|
256 |
+
depth=1,
|
257 |
+
activation=nn.Tanh
|
258 |
+
):
|
259 |
+
dim_hidden = default(dim_hidden, dim_in)
|
260 |
+
|
261 |
+
net = []
|
262 |
+
dims = (dim_in, *((dim_hidden,) * (depth - 1)), dim_out)
|
263 |
+
|
264 |
+
for ind, (layer_dim_in, layer_dim_out) in enumerate(zip(dims[:-1], dims[1:])):
|
265 |
+
is_last = ind == (len(dims) - 2)
|
266 |
+
|
267 |
+
net.append(nn.Linear(layer_dim_in, layer_dim_out))
|
268 |
+
|
269 |
+
if is_last:
|
270 |
+
continue
|
271 |
+
|
272 |
+
net.append(activation())
|
273 |
+
|
274 |
+
return nn.Sequential(*net)
|
275 |
+
|
276 |
+
|
277 |
+
class MaskEstimator(Module):
|
278 |
+
# @beartype
|
279 |
+
def __init__(
|
280 |
+
self,
|
281 |
+
dim,
|
282 |
+
dim_inputs: Tuple[int, ...],
|
283 |
+
depth,
|
284 |
+
mlp_expansion_factor=4
|
285 |
+
):
|
286 |
+
super().__init__()
|
287 |
+
self.dim_inputs = dim_inputs
|
288 |
+
self.to_freqs = ModuleList([])
|
289 |
+
dim_hidden = dim * mlp_expansion_factor
|
290 |
+
|
291 |
+
for dim_in in dim_inputs:
|
292 |
+
net = []
|
293 |
+
|
294 |
+
mlp = nn.Sequential(
|
295 |
+
MLP(dim, dim_in * 2, dim_hidden=dim_hidden, depth=depth),
|
296 |
+
nn.GLU(dim=-1)
|
297 |
+
)
|
298 |
+
|
299 |
+
self.to_freqs.append(mlp)
|
300 |
+
|
301 |
+
def forward(self, x):
|
302 |
+
x = x.unbind(dim=-2)
|
303 |
+
|
304 |
+
outs = []
|
305 |
+
|
306 |
+
for band_features, mlp in zip(x, self.to_freqs):
|
307 |
+
freq_out = mlp(band_features)
|
308 |
+
outs.append(freq_out)
|
309 |
+
|
310 |
+
return torch.cat(outs, dim=-1)
|
311 |
+
|
312 |
+
|
313 |
+
# main class
|
314 |
+
|
315 |
+
DEFAULT_FREQS_PER_BANDS = (
|
316 |
+
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
317 |
+
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
318 |
+
2, 2, 2, 2,
|
319 |
+
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
|
320 |
+
12, 12, 12, 12, 12, 12, 12, 12,
|
321 |
+
24, 24, 24, 24, 24, 24, 24, 24,
|
322 |
+
48, 48, 48, 48, 48, 48, 48, 48,
|
323 |
+
128, 129,
|
324 |
+
)
|
325 |
+
|
326 |
+
|
327 |
+
class BSRoformer(Module):
|
328 |
+
|
329 |
+
# @beartype
|
330 |
+
def __init__(
|
331 |
+
self,
|
332 |
+
dim,
|
333 |
+
*,
|
334 |
+
depth,
|
335 |
+
stereo=False,
|
336 |
+
num_stems=1,
|
337 |
+
time_transformer_depth=2,
|
338 |
+
freq_transformer_depth=2,
|
339 |
+
linear_transformer_depth=0,
|
340 |
+
freqs_per_bands: Tuple[int, ...] = DEFAULT_FREQS_PER_BANDS,
|
341 |
+
# in the paper, they divide into ~60 bands, test with 1 for starters
|
342 |
+
dim_head=64,
|
343 |
+
heads=8,
|
344 |
+
attn_dropout=0.,
|
345 |
+
ff_dropout=0.,
|
346 |
+
flash_attn=True,
|
347 |
+
dim_freqs_in=1025,
|
348 |
+
stft_n_fft=2048,
|
349 |
+
stft_hop_length=512,
|
350 |
+
# 10ms at 44100Hz, from sections 4.1, 4.4 in the paper - @faroit recommends // 2 or // 4 for better reconstruction
|
351 |
+
stft_win_length=2048,
|
352 |
+
stft_normalized=False,
|
353 |
+
stft_window_fn: Optional[Callable] = None,
|
354 |
+
mask_estimator_depth=2,
|
355 |
+
multi_stft_resolution_loss_weight=1.,
|
356 |
+
multi_stft_resolutions_window_sizes: Tuple[int, ...] = (4096, 2048, 1024, 512, 256),
|
357 |
+
multi_stft_hop_size=147,
|
358 |
+
multi_stft_normalized=False,
|
359 |
+
multi_stft_window_fn: Callable = torch.hann_window
|
360 |
+
):
|
361 |
+
super().__init__()
|
362 |
+
|
363 |
+
self.stereo = stereo
|
364 |
+
self.audio_channels = 2 if stereo else 1
|
365 |
+
self.num_stems = num_stems
|
366 |
+
|
367 |
+
self.layers = ModuleList([])
|
368 |
+
|
369 |
+
transformer_kwargs = dict(
|
370 |
+
dim=dim,
|
371 |
+
heads=heads,
|
372 |
+
dim_head=dim_head,
|
373 |
+
attn_dropout=attn_dropout,
|
374 |
+
ff_dropout=ff_dropout,
|
375 |
+
flash_attn=flash_attn,
|
376 |
+
norm_output=False
|
377 |
+
)
|
378 |
+
|
379 |
+
time_rotary_embed = RotaryEmbedding(dim=dim_head)
|
380 |
+
freq_rotary_embed = RotaryEmbedding(dim=dim_head)
|
381 |
+
|
382 |
+
for _ in range(depth):
|
383 |
+
tran_modules = []
|
384 |
+
if linear_transformer_depth > 0:
|
385 |
+
tran_modules.append(Transformer(depth=linear_transformer_depth, linear_attn=True, **transformer_kwargs))
|
386 |
+
tran_modules.append(
|
387 |
+
Transformer(depth=time_transformer_depth, rotary_embed=time_rotary_embed, **transformer_kwargs)
|
388 |
+
)
|
389 |
+
tran_modules.append(
|
390 |
+
Transformer(depth=freq_transformer_depth, rotary_embed=freq_rotary_embed, **transformer_kwargs)
|
391 |
+
)
|
392 |
+
self.layers.append(nn.ModuleList(tran_modules))
|
393 |
+
|
394 |
+
self.final_norm = RMSNorm(dim)
|
395 |
+
|
396 |
+
self.stft_kwargs = dict(
|
397 |
+
n_fft=stft_n_fft,
|
398 |
+
hop_length=stft_hop_length,
|
399 |
+
win_length=stft_win_length,
|
400 |
+
normalized=stft_normalized
|
401 |
+
)
|
402 |
+
|
403 |
+
self.stft_window_fn = partial(default(stft_window_fn, torch.hann_window), stft_win_length)
|
404 |
+
|
405 |
+
freqs = torch.stft(torch.randn(1, 4096), **self.stft_kwargs, return_complex=True).shape[1]
|
406 |
+
|
407 |
+
assert len(freqs_per_bands) > 1
|
408 |
+
assert sum(
|
409 |
+
freqs_per_bands) == freqs, f'the number of freqs in the bands must equal {freqs} based on the STFT settings, but got {sum(freqs_per_bands)}'
|
410 |
+
|
411 |
+
freqs_per_bands_with_complex = tuple(2 * f * self.audio_channels for f in freqs_per_bands)
|
412 |
+
|
413 |
+
self.band_split = BandSplit(
|
414 |
+
dim=dim,
|
415 |
+
dim_inputs=freqs_per_bands_with_complex
|
416 |
+
)
|
417 |
+
|
418 |
+
self.mask_estimators = nn.ModuleList([])
|
419 |
+
|
420 |
+
for _ in range(num_stems):
|
421 |
+
mask_estimator = MaskEstimator(
|
422 |
+
dim=dim,
|
423 |
+
dim_inputs=freqs_per_bands_with_complex,
|
424 |
+
depth=mask_estimator_depth
|
425 |
+
)
|
426 |
+
|
427 |
+
self.mask_estimators.append(mask_estimator)
|
428 |
+
|
429 |
+
# for the multi-resolution stft loss
|
430 |
+
|
431 |
+
self.multi_stft_resolution_loss_weight = multi_stft_resolution_loss_weight
|
432 |
+
self.multi_stft_resolutions_window_sizes = multi_stft_resolutions_window_sizes
|
433 |
+
self.multi_stft_n_fft = stft_n_fft
|
434 |
+
self.multi_stft_window_fn = multi_stft_window_fn
|
435 |
+
|
436 |
+
self.multi_stft_kwargs = dict(
|
437 |
+
hop_length=multi_stft_hop_size,
|
438 |
+
normalized=multi_stft_normalized
|
439 |
+
)
|
440 |
+
|
441 |
+
def forward(
|
442 |
+
self,
|
443 |
+
raw_audio,
|
444 |
+
target=None,
|
445 |
+
return_loss_breakdown=False
|
446 |
+
):
|
447 |
+
"""
|
448 |
+
einops
|
449 |
+
|
450 |
+
b - batch
|
451 |
+
f - freq
|
452 |
+
t - time
|
453 |
+
s - audio channel (1 for mono, 2 for stereo)
|
454 |
+
n - number of 'stems'
|
455 |
+
c - complex (2)
|
456 |
+
d - feature dimension
|
457 |
+
"""
|
458 |
+
|
459 |
+
device = raw_audio.device
|
460 |
+
|
461 |
+
if raw_audio.ndim == 2:
|
462 |
+
raw_audio = rearrange(raw_audio, 'b t -> b 1 t')
|
463 |
+
|
464 |
+
channels = raw_audio.shape[1]
|
465 |
+
assert (not self.stereo and channels == 1) or (
|
466 |
+
self.stereo and channels == 2), 'stereo needs to be set to True if passing in audio signal that is stereo (channel dimension of 2). also need to be False if mono (channel dimension of 1)'
|
467 |
+
|
468 |
+
# to stft
|
469 |
+
|
470 |
+
raw_audio, batch_audio_channel_packed_shape = pack_one(raw_audio, '* t')
|
471 |
+
|
472 |
+
stft_window = self.stft_window_fn(device=device)
|
473 |
+
|
474 |
+
stft_repr = torch.stft(raw_audio, **self.stft_kwargs, window=stft_window, return_complex=True)
|
475 |
+
stft_repr = torch.view_as_real(stft_repr)
|
476 |
+
|
477 |
+
stft_repr = unpack_one(stft_repr, batch_audio_channel_packed_shape, '* f t c')
|
478 |
+
stft_repr = rearrange(stft_repr,
|
479 |
+
'b s f t c -> b (f s) t c') # merge stereo / mono into the frequency, with frequency leading dimension, for band splitting
|
480 |
+
|
481 |
+
x = rearrange(stft_repr, 'b f t c -> b t (f c)')
|
482 |
+
# print("460:", x.dtype)#fp32
|
483 |
+
x = self.band_split(x)
|
484 |
+
|
485 |
+
# axial / hierarchical attention
|
486 |
+
|
487 |
+
# print("487:",x.dtype)#fp16
|
488 |
+
for transformer_block in self.layers:
|
489 |
+
|
490 |
+
if len(transformer_block) == 3:
|
491 |
+
linear_transformer, time_transformer, freq_transformer = transformer_block
|
492 |
+
|
493 |
+
x, ft_ps = pack([x], 'b * d')
|
494 |
+
# print("494:", x.dtype)#fp16
|
495 |
+
x = linear_transformer(x)
|
496 |
+
# print("496:", x.dtype)#fp16
|
497 |
+
x, = unpack(x, ft_ps, 'b * d')
|
498 |
+
else:
|
499 |
+
time_transformer, freq_transformer = transformer_block
|
500 |
+
|
501 |
+
# print("501:", x.dtype)#fp16
|
502 |
+
x = rearrange(x, 'b t f d -> b f t d')
|
503 |
+
x, ps = pack([x], '* t d')
|
504 |
+
|
505 |
+
x = time_transformer(x)
|
506 |
+
# print("505:", x.dtype)#fp16
|
507 |
+
x, = unpack(x, ps, '* t d')
|
508 |
+
x = rearrange(x, 'b f t d -> b t f d')
|
509 |
+
x, ps = pack([x], '* f d')
|
510 |
+
|
511 |
+
x = freq_transformer(x)
|
512 |
+
|
513 |
+
x, = unpack(x, ps, '* f d')
|
514 |
+
|
515 |
+
# print("515:", x.dtype)######fp16
|
516 |
+
x = self.final_norm(x)
|
517 |
+
|
518 |
+
num_stems = len(self.mask_estimators)
|
519 |
+
# print("519:", x.dtype)#fp32
|
520 |
+
mask = torch.stack([fn(x) for fn in self.mask_estimators], dim=1)
|
521 |
+
mask = rearrange(mask, 'b n t (f c) -> b n f t c', c=2)
|
522 |
+
|
523 |
+
# modulate frequency representation
|
524 |
+
|
525 |
+
stft_repr = rearrange(stft_repr, 'b f t c -> b 1 f t c')
|
526 |
+
|
527 |
+
# complex number multiplication
|
528 |
+
|
529 |
+
stft_repr = torch.view_as_complex(stft_repr)
|
530 |
+
mask = torch.view_as_complex(mask)
|
531 |
+
|
532 |
+
stft_repr = stft_repr * mask
|
533 |
+
|
534 |
+
# istft
|
535 |
+
|
536 |
+
stft_repr = rearrange(stft_repr, 'b n (f s) t -> (b n s) f t', s=self.audio_channels)
|
537 |
+
|
538 |
+
recon_audio = torch.istft(stft_repr, **self.stft_kwargs, window=stft_window, return_complex=False)
|
539 |
+
|
540 |
+
recon_audio = rearrange(recon_audio, '(b n s) t -> b n s t', s=self.audio_channels, n=num_stems)
|
541 |
+
|
542 |
+
if num_stems == 1:
|
543 |
+
recon_audio = rearrange(recon_audio, 'b 1 s t -> b s t')
|
544 |
+
|
545 |
+
# if a target is passed in, calculate loss for learning
|
546 |
+
|
547 |
+
if not exists(target):
|
548 |
+
return recon_audio
|
549 |
+
|
550 |
+
if self.num_stems > 1:
|
551 |
+
assert target.ndim == 4 and target.shape[1] == self.num_stems
|
552 |
+
|
553 |
+
if target.ndim == 2:
|
554 |
+
target = rearrange(target, '... t -> ... 1 t')
|
555 |
+
|
556 |
+
target = target[..., :recon_audio.shape[-1]] # protect against lost length on istft
|
557 |
+
|
558 |
+
loss = F.l1_loss(recon_audio, target)
|
559 |
+
|
560 |
+
multi_stft_resolution_loss = 0.
|
561 |
+
|
562 |
+
for window_size in self.multi_stft_resolutions_window_sizes:
|
563 |
+
res_stft_kwargs = dict(
|
564 |
+
n_fft=max(window_size, self.multi_stft_n_fft), # not sure what n_fft is across multi resolution stft
|
565 |
+
win_length=window_size,
|
566 |
+
return_complex=True,
|
567 |
+
window=self.multi_stft_window_fn(window_size, device=device),
|
568 |
+
**self.multi_stft_kwargs,
|
569 |
+
)
|
570 |
+
|
571 |
+
recon_Y = torch.stft(rearrange(recon_audio, '... s t -> (... s) t'), **res_stft_kwargs)
|
572 |
+
target_Y = torch.stft(rearrange(target, '... s t -> (... s) t'), **res_stft_kwargs)
|
573 |
+
|
574 |
+
multi_stft_resolution_loss = multi_stft_resolution_loss + F.l1_loss(recon_Y, target_Y)
|
575 |
+
|
576 |
+
weighted_multi_resolution_loss = multi_stft_resolution_loss * self.multi_stft_resolution_loss_weight
|
577 |
+
|
578 |
+
total_loss = loss + weighted_multi_resolution_loss
|
579 |
+
|
580 |
+
if not return_loss_breakdown:
|
581 |
+
return total_loss
|
582 |
+
|
583 |
+
return total_loss, (loss, multi_stft_resolution_loss)
|
tools/uvr5/bsroformer.py
ADDED
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This code is modified from https://github.com/ZFTurbo/
|
2 |
+
import pdb
|
3 |
+
|
4 |
+
import librosa
|
5 |
+
from tqdm import tqdm
|
6 |
+
import os
|
7 |
+
import torch
|
8 |
+
import numpy as np
|
9 |
+
import soundfile as sf
|
10 |
+
import torch.nn as nn
|
11 |
+
|
12 |
+
import warnings
|
13 |
+
warnings.filterwarnings("ignore")
|
14 |
+
from bs_roformer.bs_roformer import BSRoformer
|
15 |
+
|
16 |
+
class BsRoformer_Loader:
|
17 |
+
def get_model_from_config(self):
|
18 |
+
config = {
|
19 |
+
"attn_dropout": 0.1,
|
20 |
+
"depth": 12,
|
21 |
+
"dim": 512,
|
22 |
+
"dim_freqs_in": 1025,
|
23 |
+
"dim_head": 64,
|
24 |
+
"ff_dropout": 0.1,
|
25 |
+
"flash_attn": True,
|
26 |
+
"freq_transformer_depth": 1,
|
27 |
+
"freqs_per_bands":(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 12, 12, 12, 12, 12, 12, 12, 12, 24, 24, 24, 24, 24, 24, 24, 24, 48, 48, 48, 48, 48, 48, 48, 48, 128, 129),
|
28 |
+
"heads": 8,
|
29 |
+
"linear_transformer_depth": 0,
|
30 |
+
"mask_estimator_depth": 2,
|
31 |
+
"multi_stft_hop_size": 147,
|
32 |
+
"multi_stft_normalized": False,
|
33 |
+
"multi_stft_resolution_loss_weight": 1.0,
|
34 |
+
"multi_stft_resolutions_window_sizes":(4096, 2048, 1024, 512, 256),
|
35 |
+
"num_stems": 1,
|
36 |
+
"stereo": True,
|
37 |
+
"stft_hop_length": 441,
|
38 |
+
"stft_n_fft": 2048,
|
39 |
+
"stft_normalized": False,
|
40 |
+
"stft_win_length": 2048,
|
41 |
+
"time_transformer_depth": 1,
|
42 |
+
|
43 |
+
}
|
44 |
+
|
45 |
+
|
46 |
+
model = BSRoformer(
|
47 |
+
**dict(config)
|
48 |
+
)
|
49 |
+
|
50 |
+
return model
|
51 |
+
|
52 |
+
|
53 |
+
def demix_track(self, model, mix, device):
|
54 |
+
C = 352800
|
55 |
+
# num_overlap
|
56 |
+
N = 1
|
57 |
+
fade_size = C // 10
|
58 |
+
step = int(C // N)
|
59 |
+
border = C - step
|
60 |
+
batch_size = 4
|
61 |
+
|
62 |
+
length_init = mix.shape[-1]
|
63 |
+
|
64 |
+
progress_bar = tqdm(total=length_init // step + 1)
|
65 |
+
progress_bar.set_description("Processing")
|
66 |
+
|
67 |
+
# Do pad from the beginning and end to account floating window results better
|
68 |
+
if length_init > 2 * border and (border > 0):
|
69 |
+
mix = nn.functional.pad(mix, (border, border), mode='reflect')
|
70 |
+
|
71 |
+
# Prepare windows arrays (do 1 time for speed up). This trick repairs click problems on the edges of segment
|
72 |
+
window_size = C
|
73 |
+
fadein = torch.linspace(0, 1, fade_size)
|
74 |
+
fadeout = torch.linspace(1, 0, fade_size)
|
75 |
+
window_start = torch.ones(window_size)
|
76 |
+
window_middle = torch.ones(window_size)
|
77 |
+
window_finish = torch.ones(window_size)
|
78 |
+
window_start[-fade_size:] *= fadeout # First audio chunk, no fadein
|
79 |
+
window_finish[:fade_size] *= fadein # Last audio chunk, no fadeout
|
80 |
+
window_middle[-fade_size:] *= fadeout
|
81 |
+
window_middle[:fade_size] *= fadein
|
82 |
+
|
83 |
+
with torch.amp.autocast('cuda'):
|
84 |
+
with torch.inference_mode():
|
85 |
+
req_shape = (1, ) + tuple(mix.shape)
|
86 |
+
|
87 |
+
result = torch.zeros(req_shape, dtype=torch.float32)
|
88 |
+
counter = torch.zeros(req_shape, dtype=torch.float32)
|
89 |
+
i = 0
|
90 |
+
batch_data = []
|
91 |
+
batch_locations = []
|
92 |
+
while i < mix.shape[1]:
|
93 |
+
part = mix[:, i:i + C].to(device)
|
94 |
+
length = part.shape[-1]
|
95 |
+
if length < C:
|
96 |
+
if length > C // 2 + 1:
|
97 |
+
part = nn.functional.pad(input=part, pad=(0, C - length), mode='reflect')
|
98 |
+
else:
|
99 |
+
part = nn.functional.pad(input=part, pad=(0, C - length, 0, 0), mode='constant', value=0)
|
100 |
+
if(self.is_half==True):
|
101 |
+
part=part.half()
|
102 |
+
batch_data.append(part)
|
103 |
+
batch_locations.append((i, length))
|
104 |
+
i += step
|
105 |
+
progress_bar.update(1)
|
106 |
+
|
107 |
+
if len(batch_data) >= batch_size or (i >= mix.shape[1]):
|
108 |
+
arr = torch.stack(batch_data, dim=0)
|
109 |
+
# print(23333333,arr.dtype)
|
110 |
+
x = model(arr)
|
111 |
+
|
112 |
+
window = window_middle
|
113 |
+
if i - step == 0: # First audio chunk, no fadein
|
114 |
+
window = window_start
|
115 |
+
elif i >= mix.shape[1]: # Last audio chunk, no fadeout
|
116 |
+
window = window_finish
|
117 |
+
|
118 |
+
for j in range(len(batch_locations)):
|
119 |
+
start, l = batch_locations[j]
|
120 |
+
result[..., start:start+l] += x[j][..., :l].cpu() * window[..., :l]
|
121 |
+
counter[..., start:start+l] += window[..., :l]
|
122 |
+
|
123 |
+
batch_data = []
|
124 |
+
batch_locations = []
|
125 |
+
|
126 |
+
estimated_sources = result / counter
|
127 |
+
estimated_sources = estimated_sources.cpu().numpy()
|
128 |
+
np.nan_to_num(estimated_sources, copy=False, nan=0.0)
|
129 |
+
|
130 |
+
if length_init > 2 * border and (border > 0):
|
131 |
+
# Remove pad
|
132 |
+
estimated_sources = estimated_sources[..., border:-border]
|
133 |
+
|
134 |
+
progress_bar.close()
|
135 |
+
|
136 |
+
return {k: v for k, v in zip(['vocals', 'other'], estimated_sources)}
|
137 |
+
|
138 |
+
|
139 |
+
def run_folder(self,input, vocal_root, others_root, format):
|
140 |
+
# start_time = time.time()
|
141 |
+
self.model.eval()
|
142 |
+
path = input
|
143 |
+
|
144 |
+
if not os.path.isdir(vocal_root):
|
145 |
+
os.mkdir(vocal_root)
|
146 |
+
|
147 |
+
if not os.path.isdir(others_root):
|
148 |
+
os.mkdir(others_root)
|
149 |
+
|
150 |
+
try:
|
151 |
+
mix, sr = librosa.load(path, sr=44100, mono=False)
|
152 |
+
except Exception as e:
|
153 |
+
print('Can read track: {}'.format(path))
|
154 |
+
print('Error message: {}'.format(str(e)))
|
155 |
+
return
|
156 |
+
|
157 |
+
# Convert mono to stereo if needed
|
158 |
+
if len(mix.shape) == 1:
|
159 |
+
mix = np.stack([mix, mix], axis=0)
|
160 |
+
|
161 |
+
mix_orig = mix.copy()
|
162 |
+
|
163 |
+
mixture = torch.tensor(mix, dtype=torch.float32)
|
164 |
+
res = self.demix_track(self.model, mixture, self.device)
|
165 |
+
|
166 |
+
estimates = res['vocals'].T
|
167 |
+
|
168 |
+
if format in ["wav", "flac"]:
|
169 |
+
sf.write("{}/{}_{}.{}".format(vocal_root, os.path.basename(path)[:-4], 'vocals', format), estimates, sr)
|
170 |
+
sf.write("{}/{}_{}.{}".format(others_root, os.path.basename(path)[:-4], 'instrumental', format), mix_orig.T - estimates, sr)
|
171 |
+
else:
|
172 |
+
path_vocal = "%s/%s_vocals.wav" % (vocal_root, os.path.basename(path)[:-4])
|
173 |
+
path_other = "%s/%s_instrumental.wav" % (others_root, os.path.basename(path)[:-4])
|
174 |
+
sf.write(path_vocal, estimates, sr)
|
175 |
+
sf.write(path_other, mix_orig.T - estimates, sr)
|
176 |
+
opt_path_vocal = path_vocal[:-4] + ".%s" % format
|
177 |
+
opt_path_other = path_other[:-4] + ".%s" % format
|
178 |
+
if os.path.exists(path_vocal):
|
179 |
+
os.system(
|
180 |
+
"ffmpeg -i '%s' -vn '%s' -q:a 2 -y" % (path_vocal, opt_path_vocal)
|
181 |
+
)
|
182 |
+
if os.path.exists(opt_path_vocal):
|
183 |
+
try:
|
184 |
+
os.remove(path_vocal)
|
185 |
+
except:
|
186 |
+
pass
|
187 |
+
if os.path.exists(path_other):
|
188 |
+
os.system(
|
189 |
+
"ffmpeg -i '%s' -vn '%s' -q:a 2 -y" % (path_other, opt_path_other)
|
190 |
+
)
|
191 |
+
if os.path.exists(opt_path_other):
|
192 |
+
try:
|
193 |
+
os.remove(path_other)
|
194 |
+
except:
|
195 |
+
pass
|
196 |
+
|
197 |
+
# print("Elapsed time: {:.2f} sec".format(time.time() - start_time))
|
198 |
+
|
199 |
+
|
200 |
+
def __init__(self, model_path, device,is_half):
|
201 |
+
self.device = device
|
202 |
+
self.extract_instrumental=True
|
203 |
+
|
204 |
+
model = self.get_model_from_config()
|
205 |
+
state_dict = torch.load(model_path,map_location="cpu")
|
206 |
+
model.load_state_dict(state_dict)
|
207 |
+
self.is_half=is_half
|
208 |
+
if(is_half==False):
|
209 |
+
self.model = model.to(device)
|
210 |
+
else:
|
211 |
+
self.model = model.half().to(device)
|
212 |
+
|
213 |
+
|
214 |
+
def _path_audio_(self, input, others_root, vocal_root, format, is_hp3=False):
|
215 |
+
self.run_folder(input, vocal_root, others_root, format)
|
216 |
+
|
tools/uvr5/lib/lib_v5/dataset.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import random
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
import torch.utils.data
|
7 |
+
from tqdm import tqdm
|
8 |
+
|
9 |
+
from . import spec_utils
|
10 |
+
|
11 |
+
|
12 |
+
class VocalRemoverValidationSet(torch.utils.data.Dataset):
|
13 |
+
def __init__(self, patch_list):
|
14 |
+
self.patch_list = patch_list
|
15 |
+
|
16 |
+
def __len__(self):
|
17 |
+
return len(self.patch_list)
|
18 |
+
|
19 |
+
def __getitem__(self, idx):
|
20 |
+
path = self.patch_list[idx]
|
21 |
+
data = np.load(path)
|
22 |
+
|
23 |
+
X, y = data["X"], data["y"]
|
24 |
+
|
25 |
+
X_mag = np.abs(X)
|
26 |
+
y_mag = np.abs(y)
|
27 |
+
|
28 |
+
return X_mag, y_mag
|
29 |
+
|
30 |
+
|
31 |
+
def make_pair(mix_dir, inst_dir):
|
32 |
+
input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"]
|
33 |
+
|
34 |
+
X_list = sorted(
|
35 |
+
[
|
36 |
+
os.path.join(mix_dir, fname)
|
37 |
+
for fname in os.listdir(mix_dir)
|
38 |
+
if os.path.splitext(fname)[1] in input_exts
|
39 |
+
]
|
40 |
+
)
|
41 |
+
y_list = sorted(
|
42 |
+
[
|
43 |
+
os.path.join(inst_dir, fname)
|
44 |
+
for fname in os.listdir(inst_dir)
|
45 |
+
if os.path.splitext(fname)[1] in input_exts
|
46 |
+
]
|
47 |
+
)
|
48 |
+
|
49 |
+
filelist = list(zip(X_list, y_list))
|
50 |
+
|
51 |
+
return filelist
|
52 |
+
|
53 |
+
|
54 |
+
def train_val_split(dataset_dir, split_mode, val_rate, val_filelist):
|
55 |
+
if split_mode == "random":
|
56 |
+
filelist = make_pair(
|
57 |
+
os.path.join(dataset_dir, "mixtures"),
|
58 |
+
os.path.join(dataset_dir, "instruments"),
|
59 |
+
)
|
60 |
+
|
61 |
+
random.shuffle(filelist)
|
62 |
+
|
63 |
+
if len(val_filelist) == 0:
|
64 |
+
val_size = int(len(filelist) * val_rate)
|
65 |
+
train_filelist = filelist[:-val_size]
|
66 |
+
val_filelist = filelist[-val_size:]
|
67 |
+
else:
|
68 |
+
train_filelist = [
|
69 |
+
pair for pair in filelist if list(pair) not in val_filelist
|
70 |
+
]
|
71 |
+
elif split_mode == "subdirs":
|
72 |
+
if len(val_filelist) != 0:
|
73 |
+
raise ValueError(
|
74 |
+
"The `val_filelist` option is not available in `subdirs` mode"
|
75 |
+
)
|
76 |
+
|
77 |
+
train_filelist = make_pair(
|
78 |
+
os.path.join(dataset_dir, "training/mixtures"),
|
79 |
+
os.path.join(dataset_dir, "training/instruments"),
|
80 |
+
)
|
81 |
+
|
82 |
+
val_filelist = make_pair(
|
83 |
+
os.path.join(dataset_dir, "validation/mixtures"),
|
84 |
+
os.path.join(dataset_dir, "validation/instruments"),
|
85 |
+
)
|
86 |
+
|
87 |
+
return train_filelist, val_filelist
|
88 |
+
|
89 |
+
|
90 |
+
def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha):
|
91 |
+
perm = np.random.permutation(len(X))
|
92 |
+
for i, idx in enumerate(tqdm(perm)):
|
93 |
+
if np.random.uniform() < reduction_rate:
|
94 |
+
y[idx] = spec_utils.reduce_vocal_aggressively(
|
95 |
+
X[idx], y[idx], reduction_mask
|
96 |
+
)
|
97 |
+
|
98 |
+
if np.random.uniform() < 0.5:
|
99 |
+
# swap channel
|
100 |
+
X[idx] = X[idx, ::-1]
|
101 |
+
y[idx] = y[idx, ::-1]
|
102 |
+
if np.random.uniform() < 0.02:
|
103 |
+
# mono
|
104 |
+
X[idx] = X[idx].mean(axis=0, keepdims=True)
|
105 |
+
y[idx] = y[idx].mean(axis=0, keepdims=True)
|
106 |
+
if np.random.uniform() < 0.02:
|
107 |
+
# inst
|
108 |
+
X[idx] = y[idx]
|
109 |
+
|
110 |
+
if np.random.uniform() < mixup_rate and i < len(perm) - 1:
|
111 |
+
lam = np.random.beta(mixup_alpha, mixup_alpha)
|
112 |
+
X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]]
|
113 |
+
y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]]
|
114 |
+
|
115 |
+
return X, y
|
116 |
+
|
117 |
+
|
118 |
+
def make_padding(width, cropsize, offset):
|
119 |
+
left = offset
|
120 |
+
roi_size = cropsize - left * 2
|
121 |
+
if roi_size == 0:
|
122 |
+
roi_size = cropsize
|
123 |
+
right = roi_size - (width % roi_size) + left
|
124 |
+
|
125 |
+
return left, right, roi_size
|
126 |
+
|
127 |
+
|
128 |
+
def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset):
|
129 |
+
len_dataset = patches * len(filelist)
|
130 |
+
|
131 |
+
X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64)
|
132 |
+
y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64)
|
133 |
+
|
134 |
+
for i, (X_path, y_path) in enumerate(tqdm(filelist)):
|
135 |
+
X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft)
|
136 |
+
coef = np.max([np.abs(X).max(), np.abs(y).max()])
|
137 |
+
X, y = X / coef, y / coef
|
138 |
+
|
139 |
+
l, r, roi_size = make_padding(X.shape[2], cropsize, offset)
|
140 |
+
X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant")
|
141 |
+
y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant")
|
142 |
+
|
143 |
+
starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches)
|
144 |
+
ends = starts + cropsize
|
145 |
+
for j in range(patches):
|
146 |
+
idx = i * patches + j
|
147 |
+
X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]]
|
148 |
+
y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]]
|
149 |
+
|
150 |
+
return X_dataset, y_dataset
|
151 |
+
|
152 |
+
|
153 |
+
def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset):
|
154 |
+
patch_list = []
|
155 |
+
patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format(
|
156 |
+
cropsize, sr, hop_length, n_fft, offset
|
157 |
+
)
|
158 |
+
os.makedirs(patch_dir, exist_ok=True)
|
159 |
+
|
160 |
+
for i, (X_path, y_path) in enumerate(tqdm(filelist)):
|
161 |
+
basename = os.path.splitext(os.path.basename(X_path))[0]
|
162 |
+
|
163 |
+
X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft)
|
164 |
+
coef = np.max([np.abs(X).max(), np.abs(y).max()])
|
165 |
+
X, y = X / coef, y / coef
|
166 |
+
|
167 |
+
l, r, roi_size = make_padding(X.shape[2], cropsize, offset)
|
168 |
+
X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant")
|
169 |
+
y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant")
|
170 |
+
|
171 |
+
len_dataset = int(np.ceil(X.shape[2] / roi_size))
|
172 |
+
for j in range(len_dataset):
|
173 |
+
outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j))
|
174 |
+
start = j * roi_size
|
175 |
+
if not os.path.exists(outpath):
|
176 |
+
np.savez(
|
177 |
+
outpath,
|
178 |
+
X=X_pad[:, :, start : start + cropsize],
|
179 |
+
y=y_pad[:, :, start : start + cropsize],
|
180 |
+
)
|
181 |
+
patch_list.append(outpath)
|
182 |
+
|
183 |
+
return VocalRemoverValidationSet(patch_list)
|
tools/uvr5/lib/lib_v5/layers.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn.functional as F
|
3 |
+
from torch import nn
|
4 |
+
|
5 |
+
from . import spec_utils
|
6 |
+
|
7 |
+
|
8 |
+
class Conv2DBNActiv(nn.Module):
|
9 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
+
super(Conv2DBNActiv, self).__init__()
|
11 |
+
self.conv = nn.Sequential(
|
12 |
+
nn.Conv2d(
|
13 |
+
nin,
|
14 |
+
nout,
|
15 |
+
kernel_size=ksize,
|
16 |
+
stride=stride,
|
17 |
+
padding=pad,
|
18 |
+
dilation=dilation,
|
19 |
+
bias=False,
|
20 |
+
),
|
21 |
+
nn.BatchNorm2d(nout),
|
22 |
+
activ(),
|
23 |
+
)
|
24 |
+
|
25 |
+
def __call__(self, x):
|
26 |
+
return self.conv(x)
|
27 |
+
|
28 |
+
|
29 |
+
class SeperableConv2DBNActiv(nn.Module):
|
30 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
+
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
+
self.conv = nn.Sequential(
|
33 |
+
nn.Conv2d(
|
34 |
+
nin,
|
35 |
+
nin,
|
36 |
+
kernel_size=ksize,
|
37 |
+
stride=stride,
|
38 |
+
padding=pad,
|
39 |
+
dilation=dilation,
|
40 |
+
groups=nin,
|
41 |
+
bias=False,
|
42 |
+
),
|
43 |
+
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
+
nn.BatchNorm2d(nout),
|
45 |
+
activ(),
|
46 |
+
)
|
47 |
+
|
48 |
+
def __call__(self, x):
|
49 |
+
return self.conv(x)
|
50 |
+
|
51 |
+
|
52 |
+
class Encoder(nn.Module):
|
53 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
+
super(Encoder, self).__init__()
|
55 |
+
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
+
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
+
|
58 |
+
def __call__(self, x):
|
59 |
+
skip = self.conv1(x)
|
60 |
+
h = self.conv2(skip)
|
61 |
+
|
62 |
+
return h, skip
|
63 |
+
|
64 |
+
|
65 |
+
class Decoder(nn.Module):
|
66 |
+
def __init__(
|
67 |
+
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
+
):
|
69 |
+
super(Decoder, self).__init__()
|
70 |
+
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
+
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
+
|
73 |
+
def __call__(self, x, skip=None):
|
74 |
+
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
+
if skip is not None:
|
76 |
+
skip = spec_utils.crop_center(skip, x)
|
77 |
+
x = torch.cat([x, skip], dim=1)
|
78 |
+
h = self.conv(x)
|
79 |
+
|
80 |
+
if self.dropout is not None:
|
81 |
+
h = self.dropout(h)
|
82 |
+
|
83 |
+
return h
|
84 |
+
|
85 |
+
|
86 |
+
class ASPPModule(nn.Module):
|
87 |
+
def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
|
88 |
+
super(ASPPModule, self).__init__()
|
89 |
+
self.conv1 = nn.Sequential(
|
90 |
+
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
+
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
+
)
|
93 |
+
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
+
self.conv3 = SeperableConv2DBNActiv(
|
95 |
+
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
+
)
|
97 |
+
self.conv4 = SeperableConv2DBNActiv(
|
98 |
+
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
+
)
|
100 |
+
self.conv5 = SeperableConv2DBNActiv(
|
101 |
+
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
+
)
|
103 |
+
self.bottleneck = nn.Sequential(
|
104 |
+
Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
105 |
+
)
|
106 |
+
|
107 |
+
def forward(self, x):
|
108 |
+
_, _, h, w = x.size()
|
109 |
+
feat1 = F.interpolate(
|
110 |
+
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
111 |
+
)
|
112 |
+
feat2 = self.conv2(x)
|
113 |
+
feat3 = self.conv3(x)
|
114 |
+
feat4 = self.conv4(x)
|
115 |
+
feat5 = self.conv5(x)
|
116 |
+
out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
|
117 |
+
bottle = self.bottleneck(out)
|
118 |
+
return bottle
|
tools/uvr5/lib/lib_v5/layers_123812KB.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn.functional as F
|
3 |
+
from torch import nn
|
4 |
+
|
5 |
+
from . import spec_utils
|
6 |
+
|
7 |
+
|
8 |
+
class Conv2DBNActiv(nn.Module):
|
9 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
+
super(Conv2DBNActiv, self).__init__()
|
11 |
+
self.conv = nn.Sequential(
|
12 |
+
nn.Conv2d(
|
13 |
+
nin,
|
14 |
+
nout,
|
15 |
+
kernel_size=ksize,
|
16 |
+
stride=stride,
|
17 |
+
padding=pad,
|
18 |
+
dilation=dilation,
|
19 |
+
bias=False,
|
20 |
+
),
|
21 |
+
nn.BatchNorm2d(nout),
|
22 |
+
activ(),
|
23 |
+
)
|
24 |
+
|
25 |
+
def __call__(self, x):
|
26 |
+
return self.conv(x)
|
27 |
+
|
28 |
+
|
29 |
+
class SeperableConv2DBNActiv(nn.Module):
|
30 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
+
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
+
self.conv = nn.Sequential(
|
33 |
+
nn.Conv2d(
|
34 |
+
nin,
|
35 |
+
nin,
|
36 |
+
kernel_size=ksize,
|
37 |
+
stride=stride,
|
38 |
+
padding=pad,
|
39 |
+
dilation=dilation,
|
40 |
+
groups=nin,
|
41 |
+
bias=False,
|
42 |
+
),
|
43 |
+
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
+
nn.BatchNorm2d(nout),
|
45 |
+
activ(),
|
46 |
+
)
|
47 |
+
|
48 |
+
def __call__(self, x):
|
49 |
+
return self.conv(x)
|
50 |
+
|
51 |
+
|
52 |
+
class Encoder(nn.Module):
|
53 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
+
super(Encoder, self).__init__()
|
55 |
+
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
+
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
+
|
58 |
+
def __call__(self, x):
|
59 |
+
skip = self.conv1(x)
|
60 |
+
h = self.conv2(skip)
|
61 |
+
|
62 |
+
return h, skip
|
63 |
+
|
64 |
+
|
65 |
+
class Decoder(nn.Module):
|
66 |
+
def __init__(
|
67 |
+
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
+
):
|
69 |
+
super(Decoder, self).__init__()
|
70 |
+
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
+
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
+
|
73 |
+
def __call__(self, x, skip=None):
|
74 |
+
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
+
if skip is not None:
|
76 |
+
skip = spec_utils.crop_center(skip, x)
|
77 |
+
x = torch.cat([x, skip], dim=1)
|
78 |
+
h = self.conv(x)
|
79 |
+
|
80 |
+
if self.dropout is not None:
|
81 |
+
h = self.dropout(h)
|
82 |
+
|
83 |
+
return h
|
84 |
+
|
85 |
+
|
86 |
+
class ASPPModule(nn.Module):
|
87 |
+
def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
|
88 |
+
super(ASPPModule, self).__init__()
|
89 |
+
self.conv1 = nn.Sequential(
|
90 |
+
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
+
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
+
)
|
93 |
+
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
+
self.conv3 = SeperableConv2DBNActiv(
|
95 |
+
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
+
)
|
97 |
+
self.conv4 = SeperableConv2DBNActiv(
|
98 |
+
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
+
)
|
100 |
+
self.conv5 = SeperableConv2DBNActiv(
|
101 |
+
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
+
)
|
103 |
+
self.bottleneck = nn.Sequential(
|
104 |
+
Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
105 |
+
)
|
106 |
+
|
107 |
+
def forward(self, x):
|
108 |
+
_, _, h, w = x.size()
|
109 |
+
feat1 = F.interpolate(
|
110 |
+
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
111 |
+
)
|
112 |
+
feat2 = self.conv2(x)
|
113 |
+
feat3 = self.conv3(x)
|
114 |
+
feat4 = self.conv4(x)
|
115 |
+
feat5 = self.conv5(x)
|
116 |
+
out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
|
117 |
+
bottle = self.bottleneck(out)
|
118 |
+
return bottle
|
tools/uvr5/lib/lib_v5/layers_123821KB.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn.functional as F
|
3 |
+
from torch import nn
|
4 |
+
|
5 |
+
from . import spec_utils
|
6 |
+
|
7 |
+
|
8 |
+
class Conv2DBNActiv(nn.Module):
|
9 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
+
super(Conv2DBNActiv, self).__init__()
|
11 |
+
self.conv = nn.Sequential(
|
12 |
+
nn.Conv2d(
|
13 |
+
nin,
|
14 |
+
nout,
|
15 |
+
kernel_size=ksize,
|
16 |
+
stride=stride,
|
17 |
+
padding=pad,
|
18 |
+
dilation=dilation,
|
19 |
+
bias=False,
|
20 |
+
),
|
21 |
+
nn.BatchNorm2d(nout),
|
22 |
+
activ(),
|
23 |
+
)
|
24 |
+
|
25 |
+
def __call__(self, x):
|
26 |
+
return self.conv(x)
|
27 |
+
|
28 |
+
|
29 |
+
class SeperableConv2DBNActiv(nn.Module):
|
30 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
+
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
+
self.conv = nn.Sequential(
|
33 |
+
nn.Conv2d(
|
34 |
+
nin,
|
35 |
+
nin,
|
36 |
+
kernel_size=ksize,
|
37 |
+
stride=stride,
|
38 |
+
padding=pad,
|
39 |
+
dilation=dilation,
|
40 |
+
groups=nin,
|
41 |
+
bias=False,
|
42 |
+
),
|
43 |
+
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
+
nn.BatchNorm2d(nout),
|
45 |
+
activ(),
|
46 |
+
)
|
47 |
+
|
48 |
+
def __call__(self, x):
|
49 |
+
return self.conv(x)
|
50 |
+
|
51 |
+
|
52 |
+
class Encoder(nn.Module):
|
53 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
+
super(Encoder, self).__init__()
|
55 |
+
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
+
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
+
|
58 |
+
def __call__(self, x):
|
59 |
+
skip = self.conv1(x)
|
60 |
+
h = self.conv2(skip)
|
61 |
+
|
62 |
+
return h, skip
|
63 |
+
|
64 |
+
|
65 |
+
class Decoder(nn.Module):
|
66 |
+
def __init__(
|
67 |
+
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
+
):
|
69 |
+
super(Decoder, self).__init__()
|
70 |
+
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
+
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
+
|
73 |
+
def __call__(self, x, skip=None):
|
74 |
+
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
+
if skip is not None:
|
76 |
+
skip = spec_utils.crop_center(skip, x)
|
77 |
+
x = torch.cat([x, skip], dim=1)
|
78 |
+
h = self.conv(x)
|
79 |
+
|
80 |
+
if self.dropout is not None:
|
81 |
+
h = self.dropout(h)
|
82 |
+
|
83 |
+
return h
|
84 |
+
|
85 |
+
|
86 |
+
class ASPPModule(nn.Module):
|
87 |
+
def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
|
88 |
+
super(ASPPModule, self).__init__()
|
89 |
+
self.conv1 = nn.Sequential(
|
90 |
+
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
+
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
+
)
|
93 |
+
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
+
self.conv3 = SeperableConv2DBNActiv(
|
95 |
+
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
+
)
|
97 |
+
self.conv4 = SeperableConv2DBNActiv(
|
98 |
+
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
+
)
|
100 |
+
self.conv5 = SeperableConv2DBNActiv(
|
101 |
+
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
+
)
|
103 |
+
self.bottleneck = nn.Sequential(
|
104 |
+
Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
105 |
+
)
|
106 |
+
|
107 |
+
def forward(self, x):
|
108 |
+
_, _, h, w = x.size()
|
109 |
+
feat1 = F.interpolate(
|
110 |
+
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
111 |
+
)
|
112 |
+
feat2 = self.conv2(x)
|
113 |
+
feat3 = self.conv3(x)
|
114 |
+
feat4 = self.conv4(x)
|
115 |
+
feat5 = self.conv5(x)
|
116 |
+
out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
|
117 |
+
bottle = self.bottleneck(out)
|
118 |
+
return bottle
|
tools/uvr5/lib/lib_v5/layers_33966KB.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn.functional as F
|
3 |
+
from torch import nn
|
4 |
+
|
5 |
+
from . import spec_utils
|
6 |
+
|
7 |
+
|
8 |
+
class Conv2DBNActiv(nn.Module):
|
9 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
+
super(Conv2DBNActiv, self).__init__()
|
11 |
+
self.conv = nn.Sequential(
|
12 |
+
nn.Conv2d(
|
13 |
+
nin,
|
14 |
+
nout,
|
15 |
+
kernel_size=ksize,
|
16 |
+
stride=stride,
|
17 |
+
padding=pad,
|
18 |
+
dilation=dilation,
|
19 |
+
bias=False,
|
20 |
+
),
|
21 |
+
nn.BatchNorm2d(nout),
|
22 |
+
activ(),
|
23 |
+
)
|
24 |
+
|
25 |
+
def __call__(self, x):
|
26 |
+
return self.conv(x)
|
27 |
+
|
28 |
+
|
29 |
+
class SeperableConv2DBNActiv(nn.Module):
|
30 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
+
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
+
self.conv = nn.Sequential(
|
33 |
+
nn.Conv2d(
|
34 |
+
nin,
|
35 |
+
nin,
|
36 |
+
kernel_size=ksize,
|
37 |
+
stride=stride,
|
38 |
+
padding=pad,
|
39 |
+
dilation=dilation,
|
40 |
+
groups=nin,
|
41 |
+
bias=False,
|
42 |
+
),
|
43 |
+
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
+
nn.BatchNorm2d(nout),
|
45 |
+
activ(),
|
46 |
+
)
|
47 |
+
|
48 |
+
def __call__(self, x):
|
49 |
+
return self.conv(x)
|
50 |
+
|
51 |
+
|
52 |
+
class Encoder(nn.Module):
|
53 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
+
super(Encoder, self).__init__()
|
55 |
+
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
+
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
+
|
58 |
+
def __call__(self, x):
|
59 |
+
skip = self.conv1(x)
|
60 |
+
h = self.conv2(skip)
|
61 |
+
|
62 |
+
return h, skip
|
63 |
+
|
64 |
+
|
65 |
+
class Decoder(nn.Module):
|
66 |
+
def __init__(
|
67 |
+
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
+
):
|
69 |
+
super(Decoder, self).__init__()
|
70 |
+
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
+
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
+
|
73 |
+
def __call__(self, x, skip=None):
|
74 |
+
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
+
if skip is not None:
|
76 |
+
skip = spec_utils.crop_center(skip, x)
|
77 |
+
x = torch.cat([x, skip], dim=1)
|
78 |
+
h = self.conv(x)
|
79 |
+
|
80 |
+
if self.dropout is not None:
|
81 |
+
h = self.dropout(h)
|
82 |
+
|
83 |
+
return h
|
84 |
+
|
85 |
+
|
86 |
+
class ASPPModule(nn.Module):
|
87 |
+
def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
|
88 |
+
super(ASPPModule, self).__init__()
|
89 |
+
self.conv1 = nn.Sequential(
|
90 |
+
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
+
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
+
)
|
93 |
+
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
+
self.conv3 = SeperableConv2DBNActiv(
|
95 |
+
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
+
)
|
97 |
+
self.conv4 = SeperableConv2DBNActiv(
|
98 |
+
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
+
)
|
100 |
+
self.conv5 = SeperableConv2DBNActiv(
|
101 |
+
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
+
)
|
103 |
+
self.conv6 = SeperableConv2DBNActiv(
|
104 |
+
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
105 |
+
)
|
106 |
+
self.conv7 = SeperableConv2DBNActiv(
|
107 |
+
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
108 |
+
)
|
109 |
+
self.bottleneck = nn.Sequential(
|
110 |
+
Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
111 |
+
)
|
112 |
+
|
113 |
+
def forward(self, x):
|
114 |
+
_, _, h, w = x.size()
|
115 |
+
feat1 = F.interpolate(
|
116 |
+
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
117 |
+
)
|
118 |
+
feat2 = self.conv2(x)
|
119 |
+
feat3 = self.conv3(x)
|
120 |
+
feat4 = self.conv4(x)
|
121 |
+
feat5 = self.conv5(x)
|
122 |
+
feat6 = self.conv6(x)
|
123 |
+
feat7 = self.conv7(x)
|
124 |
+
out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
|
125 |
+
bottle = self.bottleneck(out)
|
126 |
+
return bottle
|
tools/uvr5/lib/lib_v5/layers_537227KB.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn.functional as F
|
3 |
+
from torch import nn
|
4 |
+
|
5 |
+
from . import spec_utils
|
6 |
+
|
7 |
+
|
8 |
+
class Conv2DBNActiv(nn.Module):
|
9 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
+
super(Conv2DBNActiv, self).__init__()
|
11 |
+
self.conv = nn.Sequential(
|
12 |
+
nn.Conv2d(
|
13 |
+
nin,
|
14 |
+
nout,
|
15 |
+
kernel_size=ksize,
|
16 |
+
stride=stride,
|
17 |
+
padding=pad,
|
18 |
+
dilation=dilation,
|
19 |
+
bias=False,
|
20 |
+
),
|
21 |
+
nn.BatchNorm2d(nout),
|
22 |
+
activ(),
|
23 |
+
)
|
24 |
+
|
25 |
+
def __call__(self, x):
|
26 |
+
return self.conv(x)
|
27 |
+
|
28 |
+
|
29 |
+
class SeperableConv2DBNActiv(nn.Module):
|
30 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
+
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
+
self.conv = nn.Sequential(
|
33 |
+
nn.Conv2d(
|
34 |
+
nin,
|
35 |
+
nin,
|
36 |
+
kernel_size=ksize,
|
37 |
+
stride=stride,
|
38 |
+
padding=pad,
|
39 |
+
dilation=dilation,
|
40 |
+
groups=nin,
|
41 |
+
bias=False,
|
42 |
+
),
|
43 |
+
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
+
nn.BatchNorm2d(nout),
|
45 |
+
activ(),
|
46 |
+
)
|
47 |
+
|
48 |
+
def __call__(self, x):
|
49 |
+
return self.conv(x)
|
50 |
+
|
51 |
+
|
52 |
+
class Encoder(nn.Module):
|
53 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
+
super(Encoder, self).__init__()
|
55 |
+
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
+
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
+
|
58 |
+
def __call__(self, x):
|
59 |
+
skip = self.conv1(x)
|
60 |
+
h = self.conv2(skip)
|
61 |
+
|
62 |
+
return h, skip
|
63 |
+
|
64 |
+
|
65 |
+
class Decoder(nn.Module):
|
66 |
+
def __init__(
|
67 |
+
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
+
):
|
69 |
+
super(Decoder, self).__init__()
|
70 |
+
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
+
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
+
|
73 |
+
def __call__(self, x, skip=None):
|
74 |
+
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
+
if skip is not None:
|
76 |
+
skip = spec_utils.crop_center(skip, x)
|
77 |
+
x = torch.cat([x, skip], dim=1)
|
78 |
+
h = self.conv(x)
|
79 |
+
|
80 |
+
if self.dropout is not None:
|
81 |
+
h = self.dropout(h)
|
82 |
+
|
83 |
+
return h
|
84 |
+
|
85 |
+
|
86 |
+
class ASPPModule(nn.Module):
|
87 |
+
def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
|
88 |
+
super(ASPPModule, self).__init__()
|
89 |
+
self.conv1 = nn.Sequential(
|
90 |
+
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
+
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
+
)
|
93 |
+
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
+
self.conv3 = SeperableConv2DBNActiv(
|
95 |
+
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
+
)
|
97 |
+
self.conv4 = SeperableConv2DBNActiv(
|
98 |
+
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
+
)
|
100 |
+
self.conv5 = SeperableConv2DBNActiv(
|
101 |
+
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
+
)
|
103 |
+
self.conv6 = SeperableConv2DBNActiv(
|
104 |
+
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
105 |
+
)
|
106 |
+
self.conv7 = SeperableConv2DBNActiv(
|
107 |
+
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
108 |
+
)
|
109 |
+
self.bottleneck = nn.Sequential(
|
110 |
+
Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
111 |
+
)
|
112 |
+
|
113 |
+
def forward(self, x):
|
114 |
+
_, _, h, w = x.size()
|
115 |
+
feat1 = F.interpolate(
|
116 |
+
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
117 |
+
)
|
118 |
+
feat2 = self.conv2(x)
|
119 |
+
feat3 = self.conv3(x)
|
120 |
+
feat4 = self.conv4(x)
|
121 |
+
feat5 = self.conv5(x)
|
122 |
+
feat6 = self.conv6(x)
|
123 |
+
feat7 = self.conv7(x)
|
124 |
+
out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
|
125 |
+
bottle = self.bottleneck(out)
|
126 |
+
return bottle
|
tools/uvr5/lib/lib_v5/layers_537238KB.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn.functional as F
|
3 |
+
from torch import nn
|
4 |
+
|
5 |
+
from . import spec_utils
|
6 |
+
|
7 |
+
|
8 |
+
class Conv2DBNActiv(nn.Module):
|
9 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
+
super(Conv2DBNActiv, self).__init__()
|
11 |
+
self.conv = nn.Sequential(
|
12 |
+
nn.Conv2d(
|
13 |
+
nin,
|
14 |
+
nout,
|
15 |
+
kernel_size=ksize,
|
16 |
+
stride=stride,
|
17 |
+
padding=pad,
|
18 |
+
dilation=dilation,
|
19 |
+
bias=False,
|
20 |
+
),
|
21 |
+
nn.BatchNorm2d(nout),
|
22 |
+
activ(),
|
23 |
+
)
|
24 |
+
|
25 |
+
def __call__(self, x):
|
26 |
+
return self.conv(x)
|
27 |
+
|
28 |
+
|
29 |
+
class SeperableConv2DBNActiv(nn.Module):
|
30 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
+
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
+
self.conv = nn.Sequential(
|
33 |
+
nn.Conv2d(
|
34 |
+
nin,
|
35 |
+
nin,
|
36 |
+
kernel_size=ksize,
|
37 |
+
stride=stride,
|
38 |
+
padding=pad,
|
39 |
+
dilation=dilation,
|
40 |
+
groups=nin,
|
41 |
+
bias=False,
|
42 |
+
),
|
43 |
+
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
+
nn.BatchNorm2d(nout),
|
45 |
+
activ(),
|
46 |
+
)
|
47 |
+
|
48 |
+
def __call__(self, x):
|
49 |
+
return self.conv(x)
|
50 |
+
|
51 |
+
|
52 |
+
class Encoder(nn.Module):
|
53 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
+
super(Encoder, self).__init__()
|
55 |
+
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
+
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
+
|
58 |
+
def __call__(self, x):
|
59 |
+
skip = self.conv1(x)
|
60 |
+
h = self.conv2(skip)
|
61 |
+
|
62 |
+
return h, skip
|
63 |
+
|
64 |
+
|
65 |
+
class Decoder(nn.Module):
|
66 |
+
def __init__(
|
67 |
+
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
+
):
|
69 |
+
super(Decoder, self).__init__()
|
70 |
+
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
+
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
+
|
73 |
+
def __call__(self, x, skip=None):
|
74 |
+
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
+
if skip is not None:
|
76 |
+
skip = spec_utils.crop_center(skip, x)
|
77 |
+
x = torch.cat([x, skip], dim=1)
|
78 |
+
h = self.conv(x)
|
79 |
+
|
80 |
+
if self.dropout is not None:
|
81 |
+
h = self.dropout(h)
|
82 |
+
|
83 |
+
return h
|
84 |
+
|
85 |
+
|
86 |
+
class ASPPModule(nn.Module):
|
87 |
+
def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
|
88 |
+
super(ASPPModule, self).__init__()
|
89 |
+
self.conv1 = nn.Sequential(
|
90 |
+
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
+
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
+
)
|
93 |
+
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
+
self.conv3 = SeperableConv2DBNActiv(
|
95 |
+
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
+
)
|
97 |
+
self.conv4 = SeperableConv2DBNActiv(
|
98 |
+
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
+
)
|
100 |
+
self.conv5 = SeperableConv2DBNActiv(
|
101 |
+
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
+
)
|
103 |
+
self.conv6 = SeperableConv2DBNActiv(
|
104 |
+
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
105 |
+
)
|
106 |
+
self.conv7 = SeperableConv2DBNActiv(
|
107 |
+
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
108 |
+
)
|
109 |
+
self.bottleneck = nn.Sequential(
|
110 |
+
Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
111 |
+
)
|
112 |
+
|
113 |
+
def forward(self, x):
|
114 |
+
_, _, h, w = x.size()
|
115 |
+
feat1 = F.interpolate(
|
116 |
+
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
117 |
+
)
|
118 |
+
feat2 = self.conv2(x)
|
119 |
+
feat3 = self.conv3(x)
|
120 |
+
feat4 = self.conv4(x)
|
121 |
+
feat5 = self.conv5(x)
|
122 |
+
feat6 = self.conv6(x)
|
123 |
+
feat7 = self.conv7(x)
|
124 |
+
out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
|
125 |
+
bottle = self.bottleneck(out)
|
126 |
+
return bottle
|
tools/uvr5/lib/lib_v5/layers_new.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn.functional as F
|
3 |
+
from torch import nn
|
4 |
+
|
5 |
+
from . import spec_utils
|
6 |
+
|
7 |
+
|
8 |
+
class Conv2DBNActiv(nn.Module):
|
9 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
+
super(Conv2DBNActiv, self).__init__()
|
11 |
+
self.conv = nn.Sequential(
|
12 |
+
nn.Conv2d(
|
13 |
+
nin,
|
14 |
+
nout,
|
15 |
+
kernel_size=ksize,
|
16 |
+
stride=stride,
|
17 |
+
padding=pad,
|
18 |
+
dilation=dilation,
|
19 |
+
bias=False,
|
20 |
+
),
|
21 |
+
nn.BatchNorm2d(nout),
|
22 |
+
activ(),
|
23 |
+
)
|
24 |
+
|
25 |
+
def __call__(self, x):
|
26 |
+
return self.conv(x)
|
27 |
+
|
28 |
+
|
29 |
+
class Encoder(nn.Module):
|
30 |
+
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
31 |
+
super(Encoder, self).__init__()
|
32 |
+
self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ)
|
33 |
+
self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ)
|
34 |
+
|
35 |
+
def __call__(self, x):
|
36 |
+
h = self.conv1(x)
|
37 |
+
h = self.conv2(h)
|
38 |
+
|
39 |
+
return h
|
40 |
+
|
41 |
+
|
42 |
+
class Decoder(nn.Module):
|
43 |
+
def __init__(
|
44 |
+
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
45 |
+
):
|
46 |
+
super(Decoder, self).__init__()
|
47 |
+
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
48 |
+
# self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ)
|
49 |
+
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
50 |
+
|
51 |
+
def __call__(self, x, skip=None):
|
52 |
+
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
53 |
+
|
54 |
+
if skip is not None:
|
55 |
+
skip = spec_utils.crop_center(skip, x)
|
56 |
+
x = torch.cat([x, skip], dim=1)
|
57 |
+
|
58 |
+
h = self.conv1(x)
|
59 |
+
# h = self.conv2(h)
|
60 |
+
|
61 |
+
if self.dropout is not None:
|
62 |
+
h = self.dropout(h)
|
63 |
+
|
64 |
+
return h
|
65 |
+
|
66 |
+
|
67 |
+
class ASPPModule(nn.Module):
|
68 |
+
def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False):
|
69 |
+
super(ASPPModule, self).__init__()
|
70 |
+
self.conv1 = nn.Sequential(
|
71 |
+
nn.AdaptiveAvgPool2d((1, None)),
|
72 |
+
Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ),
|
73 |
+
)
|
74 |
+
self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ)
|
75 |
+
self.conv3 = Conv2DBNActiv(
|
76 |
+
nin, nout, 3, 1, dilations[0], dilations[0], activ=activ
|
77 |
+
)
|
78 |
+
self.conv4 = Conv2DBNActiv(
|
79 |
+
nin, nout, 3, 1, dilations[1], dilations[1], activ=activ
|
80 |
+
)
|
81 |
+
self.conv5 = Conv2DBNActiv(
|
82 |
+
nin, nout, 3, 1, dilations[2], dilations[2], activ=activ
|
83 |
+
)
|
84 |
+
self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ)
|
85 |
+
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
86 |
+
|
87 |
+
def forward(self, x):
|
88 |
+
_, _, h, w = x.size()
|
89 |
+
feat1 = F.interpolate(
|
90 |
+
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
91 |
+
)
|
92 |
+
feat2 = self.conv2(x)
|
93 |
+
feat3 = self.conv3(x)
|
94 |
+
feat4 = self.conv4(x)
|
95 |
+
feat5 = self.conv5(x)
|
96 |
+
out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
|
97 |
+
out = self.bottleneck(out)
|
98 |
+
|
99 |
+
if self.dropout is not None:
|
100 |
+
out = self.dropout(out)
|
101 |
+
|
102 |
+
return out
|
103 |
+
|
104 |
+
|
105 |
+
class LSTMModule(nn.Module):
|
106 |
+
def __init__(self, nin_conv, nin_lstm, nout_lstm):
|
107 |
+
super(LSTMModule, self).__init__()
|
108 |
+
self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0)
|
109 |
+
self.lstm = nn.LSTM(
|
110 |
+
input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True
|
111 |
+
)
|
112 |
+
self.dense = nn.Sequential(
|
113 |
+
nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU()
|
114 |
+
)
|
115 |
+
|
116 |
+
def forward(self, x):
|
117 |
+
N, _, nbins, nframes = x.size()
|
118 |
+
h = self.conv(x)[:, 0] # N, nbins, nframes
|
119 |
+
h = h.permute(2, 0, 1) # nframes, N, nbins
|
120 |
+
h, _ = self.lstm(h)
|
121 |
+
h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins
|
122 |
+
h = h.reshape(nframes, N, 1, nbins)
|
123 |
+
h = h.permute(1, 2, 3, 0)
|
124 |
+
|
125 |
+
return h
|
tools/uvr5/lib/lib_v5/model_param_init.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import pathlib
|
4 |
+
|
5 |
+
default_param = {}
|
6 |
+
default_param["bins"] = 768
|
7 |
+
default_param["unstable_bins"] = 9 # training only
|
8 |
+
default_param["reduction_bins"] = 762 # training only
|
9 |
+
default_param["sr"] = 44100
|
10 |
+
default_param["pre_filter_start"] = 757
|
11 |
+
default_param["pre_filter_stop"] = 768
|
12 |
+
default_param["band"] = {}
|
13 |
+
|
14 |
+
|
15 |
+
default_param["band"][1] = {
|
16 |
+
"sr": 11025,
|
17 |
+
"hl": 128,
|
18 |
+
"n_fft": 960,
|
19 |
+
"crop_start": 0,
|
20 |
+
"crop_stop": 245,
|
21 |
+
"lpf_start": 61, # inference only
|
22 |
+
"res_type": "polyphase",
|
23 |
+
}
|
24 |
+
|
25 |
+
default_param["band"][2] = {
|
26 |
+
"sr": 44100,
|
27 |
+
"hl": 512,
|
28 |
+
"n_fft": 1536,
|
29 |
+
"crop_start": 24,
|
30 |
+
"crop_stop": 547,
|
31 |
+
"hpf_start": 81, # inference only
|
32 |
+
"res_type": "sinc_best",
|
33 |
+
}
|
34 |
+
|
35 |
+
|
36 |
+
def int_keys(d):
|
37 |
+
r = {}
|
38 |
+
for k, v in d:
|
39 |
+
if k.isdigit():
|
40 |
+
k = int(k)
|
41 |
+
r[k] = v
|
42 |
+
return r
|
43 |
+
|
44 |
+
|
45 |
+
class ModelParameters(object):
|
46 |
+
def __init__(self, config_path=""):
|
47 |
+
if ".pth" == pathlib.Path(config_path).suffix:
|
48 |
+
import zipfile
|
49 |
+
|
50 |
+
with zipfile.ZipFile(config_path, "r") as zip:
|
51 |
+
self.param = json.loads(
|
52 |
+
zip.read("param.json"), object_pairs_hook=int_keys
|
53 |
+
)
|
54 |
+
elif ".json" == pathlib.Path(config_path).suffix:
|
55 |
+
with open(config_path, "r") as f:
|
56 |
+
self.param = json.loads(f.read(), object_pairs_hook=int_keys)
|
57 |
+
else:
|
58 |
+
self.param = default_param
|
59 |
+
|
60 |
+
for k in [
|
61 |
+
"mid_side",
|
62 |
+
"mid_side_b",
|
63 |
+
"mid_side_b2",
|
64 |
+
"stereo_w",
|
65 |
+
"stereo_n",
|
66 |
+
"reverse",
|
67 |
+
]:
|
68 |
+
if not k in self.param:
|
69 |
+
self.param[k] = False
|
tools/uvr5/lib/lib_v5/modelparams/1band_sr16000_hl512.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bins": 1024,
|
3 |
+
"unstable_bins": 0,
|
4 |
+
"reduction_bins": 0,
|
5 |
+
"band": {
|
6 |
+
"1": {
|
7 |
+
"sr": 16000,
|
8 |
+
"hl": 512,
|
9 |
+
"n_fft": 2048,
|
10 |
+
"crop_start": 0,
|
11 |
+
"crop_stop": 1024,
|
12 |
+
"hpf_start": -1,
|
13 |
+
"res_type": "sinc_best"
|
14 |
+
}
|
15 |
+
},
|
16 |
+
"sr": 16000,
|
17 |
+
"pre_filter_start": 1023,
|
18 |
+
"pre_filter_stop": 1024
|
19 |
+
}
|
tools/uvr5/lib/lib_v5/modelparams/1band_sr32000_hl512.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bins": 1024,
|
3 |
+
"unstable_bins": 0,
|
4 |
+
"reduction_bins": 0,
|
5 |
+
"band": {
|
6 |
+
"1": {
|
7 |
+
"sr": 32000,
|
8 |
+
"hl": 512,
|
9 |
+
"n_fft": 2048,
|
10 |
+
"crop_start": 0,
|
11 |
+
"crop_stop": 1024,
|
12 |
+
"hpf_start": -1,
|
13 |
+
"res_type": "kaiser_fast"
|
14 |
+
}
|
15 |
+
},
|
16 |
+
"sr": 32000,
|
17 |
+
"pre_filter_start": 1000,
|
18 |
+
"pre_filter_stop": 1021
|
19 |
+
}
|
tools/uvr5/lib/lib_v5/modelparams/1band_sr33075_hl384.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bins": 1024,
|
3 |
+
"unstable_bins": 0,
|
4 |
+
"reduction_bins": 0,
|
5 |
+
"band": {
|
6 |
+
"1": {
|
7 |
+
"sr": 33075,
|
8 |
+
"hl": 384,
|
9 |
+
"n_fft": 2048,
|
10 |
+
"crop_start": 0,
|
11 |
+
"crop_stop": 1024,
|
12 |
+
"hpf_start": -1,
|
13 |
+
"res_type": "sinc_best"
|
14 |
+
}
|
15 |
+
},
|
16 |
+
"sr": 33075,
|
17 |
+
"pre_filter_start": 1000,
|
18 |
+
"pre_filter_stop": 1021
|
19 |
+
}
|
tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl1024.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bins": 1024,
|
3 |
+
"unstable_bins": 0,
|
4 |
+
"reduction_bins": 0,
|
5 |
+
"band": {
|
6 |
+
"1": {
|
7 |
+
"sr": 44100,
|
8 |
+
"hl": 1024,
|
9 |
+
"n_fft": 2048,
|
10 |
+
"crop_start": 0,
|
11 |
+
"crop_stop": 1024,
|
12 |
+
"hpf_start": -1,
|
13 |
+
"res_type": "sinc_best"
|
14 |
+
}
|
15 |
+
},
|
16 |
+
"sr": 44100,
|
17 |
+
"pre_filter_start": 1023,
|
18 |
+
"pre_filter_stop": 1024
|
19 |
+
}
|
tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl256.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bins": 256,
|
3 |
+
"unstable_bins": 0,
|
4 |
+
"reduction_bins": 0,
|
5 |
+
"band": {
|
6 |
+
"1": {
|
7 |
+
"sr": 44100,
|
8 |
+
"hl": 256,
|
9 |
+
"n_fft": 512,
|
10 |
+
"crop_start": 0,
|
11 |
+
"crop_stop": 256,
|
12 |
+
"hpf_start": -1,
|
13 |
+
"res_type": "sinc_best"
|
14 |
+
}
|
15 |
+
},
|
16 |
+
"sr": 44100,
|
17 |
+
"pre_filter_start": 256,
|
18 |
+
"pre_filter_stop": 256
|
19 |
+
}
|
tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl512.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bins": 1024,
|
3 |
+
"unstable_bins": 0,
|
4 |
+
"reduction_bins": 0,
|
5 |
+
"band": {
|
6 |
+
"1": {
|
7 |
+
"sr": 44100,
|
8 |
+
"hl": 512,
|
9 |
+
"n_fft": 2048,
|
10 |
+
"crop_start": 0,
|
11 |
+
"crop_stop": 1024,
|
12 |
+
"hpf_start": -1,
|
13 |
+
"res_type": "sinc_best"
|
14 |
+
}
|
15 |
+
},
|
16 |
+
"sr": 44100,
|
17 |
+
"pre_filter_start": 1023,
|
18 |
+
"pre_filter_stop": 1024
|
19 |
+
}
|