Spaces:
Running
on
Zero
Running
on
Zero
passerbya
commited on
Commit
•
88c8bf7
1
Parent(s):
2f49610
更换前端为WeTextProcessing
Browse files- cosyvoice/cli/frontend.py +7 -16
cosyvoice/cli/frontend.py
CHANGED
@@ -21,12 +21,9 @@ import torchaudio.compliance.kaldi as kaldi
|
|
21 |
import torchaudio
|
22 |
import os
|
23 |
import inflect
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
except:
|
28 |
-
print("failed to import ttsfrd, please normalize input text manually")
|
29 |
-
use_ttsfrd = False
|
30 |
from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph
|
31 |
|
32 |
|
@@ -53,14 +50,8 @@ class CosyVoiceFrontEnd:
|
|
53 |
self.instruct = instruct
|
54 |
self.allowed_special = allowed_special
|
55 |
self.inflect_parser = inflect.engine()
|
56 |
-
self.
|
57 |
-
|
58 |
-
self.frd = ttsfrd.TtsFrontendEngine()
|
59 |
-
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
60 |
-
assert self.frd.initialize('{}/../../pretrained_models/CosyVoice-ttsfrd/resource'.format(ROOT_DIR)) is True, 'failed to initialize ttsfrd resource'
|
61 |
-
self.frd.set_lang_type('pinyin')
|
62 |
-
self.frd.enable_pinyin_mix(True)
|
63 |
-
self.frd.set_breakmodel_index(1)
|
64 |
|
65 |
def _extract_text_token(self, text):
|
66 |
text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
|
@@ -95,8 +86,7 @@ class CosyVoiceFrontEnd:
|
|
95 |
def text_normalize(self, text, split=True):
|
96 |
text = text.strip()
|
97 |
if contains_chinese(text):
|
98 |
-
|
99 |
-
text = self.frd.get_frd_extra_info(text, 'input')
|
100 |
text = text.replace("\n", "")
|
101 |
text = replace_blank(text)
|
102 |
text = replace_corner_mark(text)
|
@@ -107,6 +97,7 @@ class CosyVoiceFrontEnd:
|
|
107 |
token_min_n=60, merge_len=20,
|
108 |
comma_split=False)]
|
109 |
else:
|
|
|
110 |
text = spell_out_number(text, self.inflect_parser)
|
111 |
texts = [i for i in split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80,
|
112 |
token_min_n=60, merge_len=20,
|
|
|
21 |
import torchaudio
|
22 |
import os
|
23 |
import inflect
|
24 |
+
from tn.chinese.normalizer import Normalizer as ZhNormalizer
|
25 |
+
from tn.english.normalizer import Normalizer as EnNormalizer
|
26 |
+
|
|
|
|
|
|
|
27 |
from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph
|
28 |
|
29 |
|
|
|
50 |
self.instruct = instruct
|
51 |
self.allowed_special = allowed_special
|
52 |
self.inflect_parser = inflect.engine()
|
53 |
+
self.zh_tn_model = ZhNormalizer(remove_erhua=False,full_to_half=False)
|
54 |
+
self.en_tn_model = EnNormalizer()
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
def _extract_text_token(self, text):
|
57 |
text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
|
|
|
86 |
def text_normalize(self, text, split=True):
|
87 |
text = text.strip()
|
88 |
if contains_chinese(text):
|
89 |
+
text = self.zh_tn_model.normalize(text)
|
|
|
90 |
text = text.replace("\n", "")
|
91 |
text = replace_blank(text)
|
92 |
text = replace_corner_mark(text)
|
|
|
97 |
token_min_n=60, merge_len=20,
|
98 |
comma_split=False)]
|
99 |
else:
|
100 |
+
text = self.en_tn_model.normalize(text)
|
101 |
text = spell_out_number(text, self.inflect_parser)
|
102 |
texts = [i for i in split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80,
|
103 |
token_min_n=60, merge_len=20,
|