CosyVoice commited on
Commit
5e97398
2 Parent(s): 9aea393 69026d8

Merge pull request #62 from passerbya/main

Browse files

更换默认ttsfrd为WeTextProcessing,修复半角句号结尾或者文本中没有标点会导致合成失败:RuntimeError: torch.cat(): expected a non-empty list of T…

cosyvoice/cli/frontend.py CHANGED
@@ -21,6 +21,8 @@ import torchaudio.compliance.kaldi as kaldi
21
  import torchaudio
22
  import os
23
  import inflect
 
 
24
  try:
25
  import ttsfrd
26
  use_ttsfrd = True
@@ -61,6 +63,9 @@ class CosyVoiceFrontEnd:
61
  self.frd.set_lang_type('pinyin')
62
  self.frd.enable_pinyin_mix(True)
63
  self.frd.set_breakmodel_index(1)
 
 
 
64
 
65
  def _extract_text_token(self, text):
66
  text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
@@ -97,6 +102,8 @@ class CosyVoiceFrontEnd:
97
  if contains_chinese(text):
98
  if self.use_ttsfrd:
99
  text = self.frd.get_frd_extra_info(text, 'input')
 
 
100
  text = text.replace("\n", "")
101
  text = replace_blank(text)
102
  text = replace_corner_mark(text)
@@ -107,6 +114,7 @@ class CosyVoiceFrontEnd:
107
  token_min_n=60, merge_len=20,
108
  comma_split=False)]
109
  else:
 
110
  text = spell_out_number(text, self.inflect_parser)
111
  texts = [i for i in split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80,
112
  token_min_n=60, merge_len=20,
 
21
  import torchaudio
22
  import os
23
  import inflect
24
+ from tn.chinese.normalizer import Normalizer as ZhNormalizer
25
+ from tn.english.normalizer import Normalizer as EnNormalizer
26
  try:
27
  import ttsfrd
28
  use_ttsfrd = True
 
63
  self.frd.set_lang_type('pinyin')
64
  self.frd.enable_pinyin_mix(True)
65
  self.frd.set_breakmodel_index(1)
66
+ else:
67
+ self.zh_tn_model = ZhNormalizer(remove_erhua=False,full_to_half=False)
68
+ self.en_tn_model = EnNormalizer()
69
 
70
  def _extract_text_token(self, text):
71
  text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
 
102
  if contains_chinese(text):
103
  if self.use_ttsfrd:
104
  text = self.frd.get_frd_extra_info(text, 'input')
105
+ else:
106
+ text = self.zh_tn_model.normalize(text)
107
  text = text.replace("\n", "")
108
  text = replace_blank(text)
109
  text = replace_corner_mark(text)
 
114
  token_min_n=60, merge_len=20,
115
  comma_split=False)]
116
  else:
117
+ text = self.en_tn_model.normalize(text)
118
  text = spell_out_number(text, self.inflect_parser)
119
  texts = [i for i in split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80,
120
  token_min_n=60, merge_len=20,
cosyvoice/utils/frontend_utils.py CHANGED
@@ -74,7 +74,7 @@ def split_paragraph(text: str, tokenize, lang="zh", token_max_n=80, token_min_n=
74
  return len(tokenize(_text)) < merge_len
75
 
76
  if lang == "zh":
77
- pounc = ['。', '?', '!', ';', ':', '.', '?', '!', ';']
78
  else:
79
  pounc = ['.', '?', '!', ';', ':']
80
  if comma_split:
@@ -91,6 +91,11 @@ def split_paragraph(text: str, tokenize, lang="zh", token_max_n=80, token_min_n=
91
  st = i + 2
92
  else:
93
  st = i + 1
 
 
 
 
 
94
  final_utts = []
95
  cur_utt = ""
96
  for utt in utts:
 
74
  return len(tokenize(_text)) < merge_len
75
 
76
  if lang == "zh":
77
+ pounc = ['。', '?', '!', ';', ':', '、', '.', '?', '!', ';']
78
  else:
79
  pounc = ['.', '?', '!', ';', ':']
80
  if comma_split:
 
91
  st = i + 2
92
  else:
93
  st = i + 1
94
+ if len(utts) == 0:
95
+ if lang == "zh":
96
+ utts.append(text + '。')
97
+ else:
98
+ utts.append(text + '.')
99
  final_utts = []
100
  cur_utt = ""
101
  for utt in utts:
requirements.txt CHANGED
@@ -25,4 +25,5 @@ soundfile==0.12.1
25
  tensorboard==2.14.0
26
  torch==2.0.1
27
  torchaudio==2.0.2
28
- wget==3.2
 
 
25
  tensorboard==2.14.0
26
  torch==2.0.1
27
  torchaudio==2.0.2
28
+ wget==3.2
29
+ WeTextProcessing