Spaces:
Running
on
Zero
Running
on
Zero
update with upstream
Browse files- README.md +5 -3
- cosyvoice/cli/frontend.py +10 -2
- cosyvoice/dataset/processor.py +0 -1
- cosyvoice/utils/frontend_utils.py +6 -1
- requirements.txt +2 -1
README.md
CHANGED
@@ -22,6 +22,8 @@ git submodule update --init --recursive
|
|
22 |
``` sh
|
23 |
conda create -n cosyvoice python=3.8
|
24 |
conda activate cosyvoice
|
|
|
|
|
25 |
pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ --trusted-host=mirrors.aliyun.com
|
26 |
|
27 |
# If you encounter sox compatibility issues
|
@@ -50,9 +52,9 @@ git clone https://www.modelscope.cn/iic/CosyVoice-300M-Instruct.git pretrained_m
|
|
50 |
git clone https://www.modelscope.cn/iic/CosyVoice-ttsfrd.git pretrained_models/CosyVoice-ttsfrd
|
51 |
```
|
52 |
|
53 |
-
Optionaly, you can unzip `ttsfrd` resouce and install `ttsfrd` package.
|
54 |
|
55 |
-
Notice that this step is not necessary. If you do not install `ttsfrd` package,
|
56 |
|
57 |
``` sh
|
58 |
cd pretrained_models/CosyVoice-ttsfrd/
|
@@ -142,7 +144,7 @@ python3 client.py --port 50000 --mode <sft|zero_shot|cross_lingual|instruct>
|
|
142 |
|
143 |
You can directly discuss on [Github Issues](https://github.com/FunAudioLLM/CosyVoice/issues).
|
144 |
|
145 |
-
You can also scan the QR code to join our
|
146 |
|
147 |
<img src="./asset/dingding.png" width="250px">
|
148 |
|
|
|
22 |
``` sh
|
23 |
conda create -n cosyvoice python=3.8
|
24 |
conda activate cosyvoice
|
25 |
+
# pynini is required by WeTextProcessing, use conda to install it as it can be executed on all platform.
|
26 |
+
conda install -y -c conda-forge pynini==2.1.5
|
27 |
pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ --trusted-host=mirrors.aliyun.com
|
28 |
|
29 |
# If you encounter sox compatibility issues
|
|
|
52 |
git clone https://www.modelscope.cn/iic/CosyVoice-ttsfrd.git pretrained_models/CosyVoice-ttsfrd
|
53 |
```
|
54 |
|
55 |
+
Optionaly, you can unzip `ttsfrd` resouce and install `ttsfrd` package for better text normalization performance.
|
56 |
|
57 |
+
Notice that this step is not necessary. If you do not install `ttsfrd` package, we will use WeTextProcessing by default.
|
58 |
|
59 |
``` sh
|
60 |
cd pretrained_models/CosyVoice-ttsfrd/
|
|
|
144 |
|
145 |
You can directly discuss on [Github Issues](https://github.com/FunAudioLLM/CosyVoice/issues).
|
146 |
|
147 |
+
You can also scan the QR code to join our official Dingding chat group.
|
148 |
|
149 |
<img src="./asset/dingding.png" width="250px">
|
150 |
|
cosyvoice/cli/frontend.py
CHANGED
@@ -24,8 +24,10 @@ import inflect
|
|
24 |
try:
|
25 |
import ttsfrd
|
26 |
use_ttsfrd = True
|
27 |
-
except:
|
28 |
-
print("failed to import ttsfrd,
|
|
|
|
|
29 |
use_ttsfrd = False
|
30 |
from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph
|
31 |
|
@@ -61,6 +63,9 @@ class CosyVoiceFrontEnd:
|
|
61 |
self.frd.set_lang_type('pinyin')
|
62 |
self.frd.enable_pinyin_mix(True)
|
63 |
self.frd.set_breakmodel_index(1)
|
|
|
|
|
|
|
64 |
|
65 |
def _extract_text_token(self, text):
|
66 |
text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
|
@@ -97,6 +102,8 @@ class CosyVoiceFrontEnd:
|
|
97 |
if contains_chinese(text):
|
98 |
if self.use_ttsfrd:
|
99 |
text = self.frd.get_frd_extra_info(text, 'input')
|
|
|
|
|
100 |
text = text.replace("\n", "")
|
101 |
text = replace_blank(text)
|
102 |
text = replace_corner_mark(text)
|
@@ -107,6 +114,7 @@ class CosyVoiceFrontEnd:
|
|
107 |
token_min_n=60, merge_len=20,
|
108 |
comma_split=False)]
|
109 |
else:
|
|
|
110 |
text = spell_out_number(text, self.inflect_parser)
|
111 |
texts = [i for i in split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80,
|
112 |
token_min_n=60, merge_len=20,
|
|
|
24 |
try:
|
25 |
import ttsfrd
|
26 |
use_ttsfrd = True
|
27 |
+
except ImportError:
|
28 |
+
print("failed to import ttsfrd, use WeTextProcessing instead")
|
29 |
+
from tn.chinese.normalizer import Normalizer as ZhNormalizer
|
30 |
+
from tn.english.normalizer import Normalizer as EnNormalizer
|
31 |
use_ttsfrd = False
|
32 |
from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph
|
33 |
|
|
|
63 |
self.frd.set_lang_type('pinyin')
|
64 |
self.frd.enable_pinyin_mix(True)
|
65 |
self.frd.set_breakmodel_index(1)
|
66 |
+
else:
|
67 |
+
self.zh_tn_model = ZhNormalizer(remove_erhua=False, full_to_half=False)
|
68 |
+
self.en_tn_model = EnNormalizer()
|
69 |
|
70 |
def _extract_text_token(self, text):
|
71 |
text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
|
|
|
102 |
if contains_chinese(text):
|
103 |
if self.use_ttsfrd:
|
104 |
text = self.frd.get_frd_extra_info(text, 'input')
|
105 |
+
else:
|
106 |
+
text = self.zh_tn_model.normalize(text)
|
107 |
text = text.replace("\n", "")
|
108 |
text = replace_blank(text)
|
109 |
text = replace_corner_mark(text)
|
|
|
114 |
token_min_n=60, merge_len=20,
|
115 |
comma_split=False)]
|
116 |
else:
|
117 |
+
text = self.en_tn_model.normalize(text)
|
118 |
text = spell_out_number(text, self.inflect_parser)
|
119 |
texts = [i for i in split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80,
|
120 |
token_min_n=60, merge_len=20,
|
cosyvoice/dataset/processor.py
CHANGED
@@ -22,7 +22,6 @@ from torch.nn.utils.rnn import pad_sequence
|
|
22 |
import torch.nn.functional as F
|
23 |
|
24 |
torchaudio.set_audio_backend('soundfile')
|
25 |
-
torchaudio.utils.sox_utils.set_buffer_size(16500)
|
26 |
|
27 |
AUDIO_FORMAT_SETS = set(['flac', 'mp3', 'm4a', 'ogg', 'opus', 'wav', 'wma'])
|
28 |
|
|
|
22 |
import torch.nn.functional as F
|
23 |
|
24 |
torchaudio.set_audio_backend('soundfile')
|
|
|
25 |
|
26 |
AUDIO_FORMAT_SETS = set(['flac', 'mp3', 'm4a', 'ogg', 'opus', 'wav', 'wma'])
|
27 |
|
cosyvoice/utils/frontend_utils.py
CHANGED
@@ -74,7 +74,7 @@ def split_paragraph(text: str, tokenize, lang="zh", token_max_n=80, token_min_n=
|
|
74 |
return len(tokenize(_text)) < merge_len
|
75 |
|
76 |
if lang == "zh":
|
77 |
-
pounc = ['。', '?', '!', ';', ':', '.', '?', '!', ';']
|
78 |
else:
|
79 |
pounc = ['.', '?', '!', ';', ':']
|
80 |
if comma_split:
|
@@ -91,6 +91,11 @@ def split_paragraph(text: str, tokenize, lang="zh", token_max_n=80, token_min_n=
|
|
91 |
st = i + 2
|
92 |
else:
|
93 |
st = i + 1
|
|
|
|
|
|
|
|
|
|
|
94 |
final_utts = []
|
95 |
cur_utt = ""
|
96 |
for utt in utts:
|
|
|
74 |
return len(tokenize(_text)) < merge_len
|
75 |
|
76 |
if lang == "zh":
|
77 |
+
pounc = ['。', '?', '!', ';', ':', '、', '.', '?', '!', ';']
|
78 |
else:
|
79 |
pounc = ['.', '?', '!', ';', ':']
|
80 |
if comma_split:
|
|
|
91 |
st = i + 2
|
92 |
else:
|
93 |
st = i + 1
|
94 |
+
if len(utts) == 0:
|
95 |
+
if lang == "zh":
|
96 |
+
utts.append(text + '。')
|
97 |
+
else:
|
98 |
+
utts.append(text + '.')
|
99 |
final_utts = []
|
100 |
cur_utt = ""
|
101 |
for utt in utts:
|
requirements.txt
CHANGED
@@ -27,4 +27,5 @@ torch==2.0.1
|
|
27 |
torchaudio==2.0.2
|
28 |
wget==3.2
|
29 |
fastapi==0.111.0
|
30 |
-
fastapi-cli==0.0.4
|
|
|
|
27 |
torchaudio==2.0.2
|
28 |
wget==3.2
|
29 |
fastapi==0.111.0
|
30 |
+
fastapi-cli==0.0.4
|
31 |
+
WeTextProcessing==1.0.3
|