CosyVoice commited on
Commit
e19e80f
1 Parent(s): c901a12

update tempo change

Browse files
cosyvoice/cli/cosyvoice.py CHANGED
@@ -53,43 +53,43 @@ class CosyVoice:
53
  spks = list(self.frontend.spk2info.keys())
54
  return spks
55
 
56
- def inference_sft(self, tts_text, spk_id, stream=False):
57
  for i in tqdm(self.frontend.text_normalize(tts_text, split=True)):
58
  model_input = self.frontend.frontend_sft(i, spk_id)
59
  start_time = time.time()
60
  logging.info('synthesis text {}'.format(i))
61
- for model_output in self.model.inference(**model_input, stream=stream):
62
  speech_len = model_output['tts_speech'].shape[1] / 22050
63
  logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
64
  yield model_output
65
  start_time = time.time()
66
 
67
- def inference_zero_shot(self, tts_text, prompt_text, prompt_speech_16k, stream=False):
68
  prompt_text = self.frontend.text_normalize(prompt_text, split=False)
69
  for i in tqdm(self.frontend.text_normalize(tts_text, split=True)):
70
  model_input = self.frontend.frontend_zero_shot(i, prompt_text, prompt_speech_16k)
71
  start_time = time.time()
72
  logging.info('synthesis text {}'.format(i))
73
- for model_output in self.model.inference(**model_input, stream=stream):
74
  speech_len = model_output['tts_speech'].shape[1] / 22050
75
  logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
76
  yield model_output
77
  start_time = time.time()
78
 
79
- def inference_cross_lingual(self, tts_text, prompt_speech_16k, stream=False):
80
  if self.frontend.instruct is True:
81
  raise ValueError('{} do not support cross_lingual inference'.format(self.model_dir))
82
  for i in tqdm(self.frontend.text_normalize(tts_text, split=True)):
83
  model_input = self.frontend.frontend_cross_lingual(i, prompt_speech_16k)
84
  start_time = time.time()
85
  logging.info('synthesis text {}'.format(i))
86
- for model_output in self.model.inference(**model_input, stream=stream):
87
  speech_len = model_output['tts_speech'].shape[1] / 22050
88
  logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
89
  yield model_output
90
  start_time = time.time()
91
 
92
- def inference_instruct(self, tts_text, spk_id, instruct_text, stream=False):
93
  if self.frontend.instruct is False:
94
  raise ValueError('{} do not support instruct inference'.format(self.model_dir))
95
  instruct_text = self.frontend.text_normalize(instruct_text, split=False)
@@ -97,7 +97,7 @@ class CosyVoice:
97
  model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text)
98
  start_time = time.time()
99
  logging.info('synthesis text {}'.format(i))
100
- for model_output in self.model.inference(**model_input, stream=stream):
101
  speech_len = model_output['tts_speech'].shape[1] / 22050
102
  logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
103
  yield model_output
 
53
  spks = list(self.frontend.spk2info.keys())
54
  return spks
55
 
56
+ def inference_sft(self, tts_text, spk_id, stream=False, speed=1.0):
57
  for i in tqdm(self.frontend.text_normalize(tts_text, split=True)):
58
  model_input = self.frontend.frontend_sft(i, spk_id)
59
  start_time = time.time()
60
  logging.info('synthesis text {}'.format(i))
61
+ for model_output in self.model.inference(**model_input, stream=stream, speed=speed):
62
  speech_len = model_output['tts_speech'].shape[1] / 22050
63
  logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
64
  yield model_output
65
  start_time = time.time()
66
 
67
+ def inference_zero_shot(self, tts_text, prompt_text, prompt_speech_16k, stream=False, speed=1.0):
68
  prompt_text = self.frontend.text_normalize(prompt_text, split=False)
69
  for i in tqdm(self.frontend.text_normalize(tts_text, split=True)):
70
  model_input = self.frontend.frontend_zero_shot(i, prompt_text, prompt_speech_16k)
71
  start_time = time.time()
72
  logging.info('synthesis text {}'.format(i))
73
+ for model_output in self.model.inference(**model_input, stream=stream, speed=speed):
74
  speech_len = model_output['tts_speech'].shape[1] / 22050
75
  logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
76
  yield model_output
77
  start_time = time.time()
78
 
79
+ def inference_cross_lingual(self, tts_text, prompt_speech_16k, stream=False, speed=1.0):
80
  if self.frontend.instruct is True:
81
  raise ValueError('{} do not support cross_lingual inference'.format(self.model_dir))
82
  for i in tqdm(self.frontend.text_normalize(tts_text, split=True)):
83
  model_input = self.frontend.frontend_cross_lingual(i, prompt_speech_16k)
84
  start_time = time.time()
85
  logging.info('synthesis text {}'.format(i))
86
+ for model_output in self.model.inference(**model_input, stream=stream, speed=speed):
87
  speech_len = model_output['tts_speech'].shape[1] / 22050
88
  logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
89
  yield model_output
90
  start_time = time.time()
91
 
92
+ def inference_instruct(self, tts_text, spk_id, instruct_text, stream=False, speed=1.0):
93
  if self.frontend.instruct is False:
94
  raise ValueError('{} do not support instruct inference'.format(self.model_dir))
95
  instruct_text = self.frontend.text_normalize(instruct_text, split=False)
 
97
  model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text)
98
  start_time = time.time()
99
  logging.info('synthesis text {}'.format(i))
100
+ for model_output in self.model.inference(**model_input, stream=stream, speed=speed):
101
  speech_len = model_output['tts_speech'].shape[1] / 22050
102
  logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
103
  yield model_output
cosyvoice/cli/model.py CHANGED
@@ -15,6 +15,7 @@ import torch
15
  import numpy as np
16
  import threading
17
  import time
 
18
  from contextlib import nullcontext
19
  import uuid
20
  from cosyvoice.utils.common import fade_in_out
@@ -91,7 +92,7 @@ class CosyVoiceModel:
91
  self.tts_speech_token_dict[uuid].append(i)
92
  self.llm_end_dict[uuid] = True
93
 
94
- def token2wav(self, token, prompt_token, prompt_feat, embedding, uuid, finalize=False):
95
  tts_mel = self.flow.inference(token=token.to(self.device),
96
  token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
97
  prompt_token=prompt_token.to(self.device),
@@ -116,6 +117,9 @@ class CosyVoiceModel:
116
  self.hift_cache_dict[uuid] = {'source': tts_source[:, :, -self.source_cache_len:], 'mel': tts_mel[:, :, -self.mel_cache_len:]}
117
  tts_speech = tts_speech[:, :-self.source_cache_len]
118
  else:
 
 
 
119
  tts_speech, tts_source = self.hift.inference(mel=tts_mel, cache_source=hift_cache_source)
120
  return tts_speech
121
 
@@ -123,7 +127,7 @@ class CosyVoiceModel:
123
  prompt_text=torch.zeros(1, 0, dtype=torch.int32),
124
  llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
125
  flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
126
- prompt_speech_feat=torch.zeros(1, 0, 80), stream=False, **kwargs):
127
  # this_uuid is used to track variables related to this inference thread
128
  this_uuid = str(uuid.uuid1())
129
  with self.lock:
@@ -169,7 +173,8 @@ class CosyVoiceModel:
169
  prompt_feat=prompt_speech_feat,
170
  embedding=flow_embedding,
171
  uuid=this_uuid,
172
- finalize=True)
 
173
  yield {'tts_speech': this_tts_speech.cpu()}
174
  with self.lock:
175
  self.tts_speech_token_dict.pop(this_uuid)
 
15
  import numpy as np
16
  import threading
17
  import time
18
+ from torch.nn import functional as F
19
  from contextlib import nullcontext
20
  import uuid
21
  from cosyvoice.utils.common import fade_in_out
 
92
  self.tts_speech_token_dict[uuid].append(i)
93
  self.llm_end_dict[uuid] = True
94
 
95
+ def token2wav(self, token, prompt_token, prompt_feat, embedding, uuid, finalize=False, speed=1.0):
96
  tts_mel = self.flow.inference(token=token.to(self.device),
97
  token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
98
  prompt_token=prompt_token.to(self.device),
 
117
  self.hift_cache_dict[uuid] = {'source': tts_source[:, :, -self.source_cache_len:], 'mel': tts_mel[:, :, -self.mel_cache_len:]}
118
  tts_speech = tts_speech[:, :-self.source_cache_len]
119
  else:
120
+ if speed != 1.0:
121
+ assert self.hift_cache_dict[uuid] is None, 'speed change only support non-stream inference mode'
122
+ tts_mel = F.interpolate(tts_mel, size=int(tts_mel.shape[2] / speed), mode='linear')
123
  tts_speech, tts_source = self.hift.inference(mel=tts_mel, cache_source=hift_cache_source)
124
  return tts_speech
125
 
 
127
  prompt_text=torch.zeros(1, 0, dtype=torch.int32),
128
  llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
129
  flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
130
+ prompt_speech_feat=torch.zeros(1, 0, 80), stream=False, speed=1.0, **kwargs):
131
  # this_uuid is used to track variables related to this inference thread
132
  this_uuid = str(uuid.uuid1())
133
  with self.lock:
 
173
  prompt_feat=prompt_speech_feat,
174
  embedding=flow_embedding,
175
  uuid=this_uuid,
176
+ finalize=True,
177
+ speed=speed)
178
  yield {'tts_speech': this_tts_speech.cpu()}
179
  with self.lock:
180
  self.tts_speech_token_dict.pop(this_uuid)
cosyvoice/utils/file_utils.py CHANGED
@@ -45,16 +45,3 @@ def load_wav(wav, target_sr):
45
  assert sample_rate > target_sr, 'wav sample rate {} must be greater than {}'.format(sample_rate, target_sr)
46
  speech = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_sr)(speech)
47
  return speech
48
-
49
-
50
- def speed_change(waveform, sample_rate, speed_factor: str):
51
- effects = [
52
- ["tempo", speed_factor], # speed_factor
53
- ["rate", f"{sample_rate}"]
54
- ]
55
- augmented_waveform, new_sample_rate = torchaudio.sox_effects.apply_effects_tensor(
56
- waveform,
57
- sample_rate,
58
- effects
59
- )
60
- return augmented_waveform, new_sample_rate
 
45
  assert sample_rate > target_sr, 'wav sample rate {} must be greater than {}'.format(sample_rate, target_sr)
46
  speech = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_sr)(speech)
47
  return speech
 
 
 
 
 
 
 
 
 
 
 
 
 
webui.py CHANGED
@@ -66,7 +66,7 @@ def change_instruction(mode_checkbox_group):
66
 
67
 
68
  def generate_audio(tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text,
69
- seed, stream, speed_factor):
70
  if prompt_wav_upload is not None:
71
  prompt_wav = prompt_wav_upload
72
  elif prompt_wav_record is not None:
@@ -117,24 +117,24 @@ def generate_audio(tts_text, mode_checkbox_group, sft_dropdown, prompt_text, pro
117
  if mode_checkbox_group == '预训练音色':
118
  logging.info('get sft inference request')
119
  set_all_random_seed(seed)
120
- for i in cosyvoice.inference_sft(tts_text, sft_dropdown, stream=stream):
121
  yield (target_sr, i['tts_speech'].numpy().flatten())
122
  elif mode_checkbox_group == '3s极速复刻':
123
  logging.info('get zero_shot inference request')
124
  prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr))
125
  set_all_random_seed(seed)
126
- for i in cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k, stream=stream):
127
  yield (target_sr, i['tts_speech'].numpy().flatten())
128
  elif mode_checkbox_group == '跨语种复刻':
129
  logging.info('get cross_lingual inference request')
130
  prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr))
131
  set_all_random_seed(seed)
132
- for i in cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k, stream=stream):
133
  yield (target_sr, i['tts_speech'].numpy().flatten())
134
  else:
135
  logging.info('get instruct inference request')
136
  set_all_random_seed(seed)
137
- for i in cosyvoice.inference_instruct(tts_text, sft_dropdown, instruct_text, stream=stream):
138
  yield (target_sr, i['tts_speech'].numpy().flatten())
139
 
140
 
@@ -147,12 +147,12 @@ def main():
147
  gr.Markdown("#### 请输入需要合成的文本,选择推理模式,并按照提示步骤进行操作")
148
 
149
  tts_text = gr.Textbox(label="输入合成文本", lines=1, value="我是通义实验室语音团队全新推出的生成式语音大模型,提供舒适自然的语音合成能力。")
150
- speed_factor = gr.Slider(minimum=0.25, maximum=4, step=0.05, label="语速调节", value=1.0, interactive=True)
151
  with gr.Row():
152
  mode_checkbox_group = gr.Radio(choices=inference_mode_list, label='选择推理模式', value=inference_mode_list[0])
153
  instruction_text = gr.Text(label="操作步骤", value=instruct_dict[inference_mode_list[0]], scale=0.5)
154
  sft_dropdown = gr.Dropdown(choices=sft_spk, label='选择预训练音色', value=sft_spk[0], scale=0.25)
155
  stream = gr.Radio(choices=stream_mode_list, label='是否流式推理', value=stream_mode_list[0][1])
 
156
  with gr.Column(scale=0.25):
157
  seed_button = gr.Button(value="\U0001F3B2")
158
  seed = gr.Number(value=0, label="随机推理种子")
@@ -170,7 +170,7 @@ def main():
170
  seed_button.click(generate_seed, inputs=[], outputs=seed)
171
  generate_button.click(generate_audio,
172
  inputs=[tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text,
173
- seed, stream, speed_factor],
174
  outputs=[audio_output])
175
  mode_checkbox_group.change(fn=change_instruction, inputs=[mode_checkbox_group], outputs=[instruction_text])
176
  demo.queue(max_size=4, default_concurrency_limit=2)
 
66
 
67
 
68
  def generate_audio(tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text,
69
+ seed, stream, speed):
70
  if prompt_wav_upload is not None:
71
  prompt_wav = prompt_wav_upload
72
  elif prompt_wav_record is not None:
 
117
  if mode_checkbox_group == '预训练音色':
118
  logging.info('get sft inference request')
119
  set_all_random_seed(seed)
120
+ for i in cosyvoice.inference_sft(tts_text, sft_dropdown, stream=stream, speed=speed):
121
  yield (target_sr, i['tts_speech'].numpy().flatten())
122
  elif mode_checkbox_group == '3s极速复刻':
123
  logging.info('get zero_shot inference request')
124
  prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr))
125
  set_all_random_seed(seed)
126
+ for i in cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k, stream=stream, speed=speed):
127
  yield (target_sr, i['tts_speech'].numpy().flatten())
128
  elif mode_checkbox_group == '跨语种复刻':
129
  logging.info('get cross_lingual inference request')
130
  prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr))
131
  set_all_random_seed(seed)
132
+ for i in cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k, stream=stream, speed=speed):
133
  yield (target_sr, i['tts_speech'].numpy().flatten())
134
  else:
135
  logging.info('get instruct inference request')
136
  set_all_random_seed(seed)
137
+ for i in cosyvoice.inference_instruct(tts_text, sft_dropdown, instruct_text, stream=stream, speed=speed):
138
  yield (target_sr, i['tts_speech'].numpy().flatten())
139
 
140
 
 
147
  gr.Markdown("#### 请输入需要合成的文本,选择推理模式,并按照提示步骤进行操作")
148
 
149
  tts_text = gr.Textbox(label="输入合成文本", lines=1, value="我是通义实验室语音团队全新推出的生成式语音大模型,提供舒适自然的语音合成能力。")
 
150
  with gr.Row():
151
  mode_checkbox_group = gr.Radio(choices=inference_mode_list, label='选择推理模式', value=inference_mode_list[0])
152
  instruction_text = gr.Text(label="操作步骤", value=instruct_dict[inference_mode_list[0]], scale=0.5)
153
  sft_dropdown = gr.Dropdown(choices=sft_spk, label='选择预训练音色', value=sft_spk[0], scale=0.25)
154
  stream = gr.Radio(choices=stream_mode_list, label='是否流式推理', value=stream_mode_list[0][1])
155
+ speed = gr.Number(value=1, label="速度调节(仅支持非流式推理)", minimum=0.5, maximum=2.0, step=0.1)
156
  with gr.Column(scale=0.25):
157
  seed_button = gr.Button(value="\U0001F3B2")
158
  seed = gr.Number(value=0, label="随机推理种子")
 
170
  seed_button.click(generate_seed, inputs=[], outputs=seed)
171
  generate_button.click(generate_audio,
172
  inputs=[tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text,
173
+ seed, stream, speed],
174
  outputs=[audio_output])
175
  mode_checkbox_group.change(fn=change_instruction, inputs=[mode_checkbox_group], outputs=[instruction_text])
176
  demo.queue(max_size=4, default_concurrency_limit=2)