Spaces:
Runtime error
Runtime error
Upload demo_cli.py with huggingface_hub
Browse files- demo_cli.py +203 -0
demo_cli.py
ADDED
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from encoder.params_model import model_embedding_size as speaker_embedding_size
|
2 |
+
from utils.argutils import print_args
|
3 |
+
from utils.modelutils import check_model_paths
|
4 |
+
from synthesizer.inference import Synthesizer
|
5 |
+
from encoder import inference as encoder
|
6 |
+
from vocoder import inference as vocoder
|
7 |
+
from pathlib import Path
|
8 |
+
import numpy as np
|
9 |
+
import soundfile as sf
|
10 |
+
import librosa
|
11 |
+
import argparse
|
12 |
+
import torch
|
13 |
+
import sys
|
14 |
+
import os
|
15 |
+
from audioread.exceptions import NoBackendError
|
16 |
+
|
17 |
+
if __name__ == '__main__':
|
18 |
+
## Info & args
|
19 |
+
parser = argparse.ArgumentParser(
|
20 |
+
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
21 |
+
)
|
22 |
+
parser.add_argument("-e", "--enc_model_fpath", type=Path,
|
23 |
+
default="encpretrained.pt",
|
24 |
+
help="Path to a saved encoder")
|
25 |
+
parser.add_argument("-s", "--syn_model_fpath", type=Path,
|
26 |
+
default="synpretrained.pt",
|
27 |
+
help="Path to a saved synthesizer")
|
28 |
+
parser.add_argument("-v", "--voc_model_fpath", type=Path,
|
29 |
+
default="vocpretrained.pt",
|
30 |
+
help="Path to a saved vocoder")
|
31 |
+
parser.add_argument("--cpu", action="store_true", help="If True, processing is done on CPU, even when a GPU is available.")
|
32 |
+
parser.add_argument("--no_sound", action="store_true", help="If True, audio won't be played.")
|
33 |
+
parser.add_argument("--seed", type=int, default=None, help="Optional random number seed value to make toolbox deterministic.")
|
34 |
+
parser.add_argument("--no_mp3_support", action="store_true", help="If True, disallows loading mp3 files to prevent audioread errors when ffmpeg is not installed.")
|
35 |
+
parser.add_argument("-audio", "--audio_path", type=Path, required = True,
|
36 |
+
help="Path to a audio file")
|
37 |
+
parser.add_argument("--text", type=str, required = True, help="Text Input")
|
38 |
+
args = parser.parse_args()
|
39 |
+
print_args(args, parser)
|
40 |
+
if not args.no_sound:
|
41 |
+
import sounddevice as sd
|
42 |
+
|
43 |
+
if args.cpu:
|
44 |
+
# Hide GPUs from Pytorch to force CPU processing
|
45 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
46 |
+
|
47 |
+
if not args.no_mp3_support:
|
48 |
+
try:
|
49 |
+
librosa.load("samples/1320_00000.mp3")
|
50 |
+
except NoBackendError:
|
51 |
+
print("Librosa will be unable to open mp3 files if additional software is not installed.\n"
|
52 |
+
"Please install ffmpeg or add the '--no_mp3_support' option to proceed without support for mp3 files.")
|
53 |
+
exit(-1)
|
54 |
+
|
55 |
+
print("Running a test of your configuration...\n")
|
56 |
+
|
57 |
+
if torch.cuda.is_available():
|
58 |
+
device_id = torch.cuda.current_device()
|
59 |
+
gpu_properties = torch.cuda.get_device_properties(device_id)
|
60 |
+
## Print some environment information (for debugging purposes)
|
61 |
+
print("Found %d GPUs available. Using GPU %d (%s) of compute capability %d.%d with "
|
62 |
+
"%.1fGb total memory.\n" %
|
63 |
+
(torch.cuda.device_count(),
|
64 |
+
device_id,
|
65 |
+
gpu_properties.name,
|
66 |
+
gpu_properties.major,
|
67 |
+
gpu_properties.minor,
|
68 |
+
gpu_properties.total_memory / 1e9))
|
69 |
+
else:
|
70 |
+
print("Using CPU for inference.\n")
|
71 |
+
|
72 |
+
## Remind the user to download pretrained models if needed
|
73 |
+
check_model_paths(encoder_path=args.enc_model_fpath,
|
74 |
+
synthesizer_path=args.syn_model_fpath,
|
75 |
+
vocoder_path=args.voc_model_fpath)
|
76 |
+
|
77 |
+
## Load the models one by one.
|
78 |
+
print("Preparing the encoder, the synthesizer and the vocoder...")
|
79 |
+
encoder.load_model(args.enc_model_fpath)
|
80 |
+
synthesizer = Synthesizer(args.syn_model_fpath)
|
81 |
+
vocoder.load_model(args.voc_model_fpath)
|
82 |
+
|
83 |
+
|
84 |
+
## Run a test
|
85 |
+
# print("Testing your configuration with small inputs.")
|
86 |
+
# # Forward an audio waveform of zeroes that lasts 1 second. Notice how we can get the encoder's
|
87 |
+
# # sampling rate, which may differ.
|
88 |
+
# # If you're unfamiliar with digital audio, know that it is encoded as an array of floats
|
89 |
+
# # (or sometimes integers, but mostly floats in this projects) ranging from -1 to 1.
|
90 |
+
# # The sampling rate is the number of values (samples) recorded per second, it is set to
|
91 |
+
# # 16000 for the encoder. Creating an array of length <sampling_rate> will always correspond
|
92 |
+
# # to an audio of 1 second.
|
93 |
+
# print(" Testing the encoder...")
|
94 |
+
# encoder.embed_utterance(np.zeros(encoder.sampling_rate))
|
95 |
+
|
96 |
+
# # Create a dummy embedding. You would normally use the embedding that encoder.embed_utterance
|
97 |
+
# # returns, but here we're going to make one ourselves just for the sake of showing that it's
|
98 |
+
# # possible.
|
99 |
+
# embed = np.random.rand(speaker_embedding_size)
|
100 |
+
# # Embeddings are L2-normalized (this isn't important here, but if you want to make your own
|
101 |
+
# # embeddings it will be).
|
102 |
+
# embed /= np.linalg.norm(embed)
|
103 |
+
# # The synthesizer can handle multiple inputs with batching. Let's create another embedding to
|
104 |
+
# # illustrate that
|
105 |
+
# embeds = [embed, np.zeros(speaker_embedding_size)]
|
106 |
+
# texts = ["test 1", "test 2"]
|
107 |
+
# print(" Testing the synthesizer... (loading the model will output a lot of text)")
|
108 |
+
# mels = synthesizer.synthesize_spectrograms(texts, embeds)
|
109 |
+
|
110 |
+
# # The vocoder synthesizes one waveform at a time, but it's more efficient for long ones. We
|
111 |
+
# # can concatenate the mel spectrograms to a single one.
|
112 |
+
# mel = np.concatenate(mels, axis=1)
|
113 |
+
# # The vocoder can take a callback function to display the generation. More on that later. For
|
114 |
+
# # now we'll simply hide it like this:
|
115 |
+
# no_action = lambda *args: None
|
116 |
+
# print(" Testing the vocoder...")
|
117 |
+
# # For the sake of making this test short, we'll pass a short target length. The target length
|
118 |
+
# # is the length of the wav segments that are processed in parallel. E.g. for audio sampled
|
119 |
+
# # at 16000 Hertz, a target length of 8000 means that the target audio will be cut in chunks of
|
120 |
+
# # 0.5 seconds which will all be generated together. The parameters here are absurdly short, and
|
121 |
+
# # that has a detrimental effect on the quality of the audio. The default parameters are
|
122 |
+
# # recommended in general.
|
123 |
+
# vocoder.infer_waveform(mel, target=200, overlap=50, progress_callback=no_action)
|
124 |
+
|
125 |
+
print("All test passed! You can now synthesize speech.\n\n")
|
126 |
+
|
127 |
+
|
128 |
+
## Interactive speech generation
|
129 |
+
print("This is a GUI-less example of interface to SV2TTS. The purpose of this script is to "
|
130 |
+
"show how you can interface this project easily with your own. See the source code for "
|
131 |
+
"an explanation of what is happening.\n")
|
132 |
+
|
133 |
+
print("Interactive generation loop")
|
134 |
+
# while True:
|
135 |
+
# Get the reference audio filepath
|
136 |
+
message = "Reference voice: enter an audio filepath of a voice to be cloned (mp3, " "wav, m4a, flac, ...):\n"
|
137 |
+
in_fpath = args.audio_path
|
138 |
+
|
139 |
+
if in_fpath.suffix.lower() == ".mp3" and args.no_mp3_support:
|
140 |
+
print("Can't Use mp3 files please try again:")
|
141 |
+
## Computing the embedding
|
142 |
+
# First, we load the wav using the function that the speaker encoder provides. This is
|
143 |
+
# important: there is preprocessing that must be applied.
|
144 |
+
|
145 |
+
# The following two methods are equivalent:
|
146 |
+
# - Directly load from the filepath:
|
147 |
+
preprocessed_wav = encoder.preprocess_wav(in_fpath)
|
148 |
+
# - If the wav is already loaded:
|
149 |
+
original_wav, sampling_rate = librosa.load(str(in_fpath))
|
150 |
+
preprocessed_wav = encoder.preprocess_wav(original_wav, sampling_rate)
|
151 |
+
print("Loaded file succesfully")
|
152 |
+
|
153 |
+
# Then we derive the embedding. There are many functions and parameters that the
|
154 |
+
# speaker encoder interfaces. These are mostly for in-depth research. You will typically
|
155 |
+
# only use this function (with its default parameters):
|
156 |
+
embed = encoder.embed_utterance(preprocessed_wav)
|
157 |
+
print("Created the embedding")
|
158 |
+
|
159 |
+
|
160 |
+
## Generating the spectrogram
|
161 |
+
text = args.text
|
162 |
+
|
163 |
+
# If seed is specified, reset torch seed and force synthesizer reload
|
164 |
+
if args.seed is not None:
|
165 |
+
torch.manual_seed(args.seed)
|
166 |
+
synthesizer = Synthesizer(args.syn_model_fpath)
|
167 |
+
|
168 |
+
# The synthesizer works in batch, so you need to put your data in a list or numpy array
|
169 |
+
texts = [text]
|
170 |
+
embeds = [embed]
|
171 |
+
# If you know what the attention layer alignments are, you can retrieve them here by
|
172 |
+
# passing return_alignments=True
|
173 |
+
specs = synthesizer.synthesize_spectrograms(texts, embeds)
|
174 |
+
spec = specs[0]
|
175 |
+
print("Created the mel spectrogram")
|
176 |
+
|
177 |
+
|
178 |
+
## Generating the waveform
|
179 |
+
print("Synthesizing the waveform:")
|
180 |
+
|
181 |
+
# If seed is specified, reset torch seed and reload vocoder
|
182 |
+
if args.seed is not None:
|
183 |
+
torch.manual_seed(args.seed)
|
184 |
+
vocoder.load_model(args.voc_model_fpath)
|
185 |
+
|
186 |
+
# Synthesizing the waveform is fairly straightforward. Remember that the longer the
|
187 |
+
# spectrogram, the more time-efficient the vocoder.
|
188 |
+
generated_wav = vocoder.infer_waveform(spec)
|
189 |
+
|
190 |
+
|
191 |
+
## Post-generation
|
192 |
+
# There's a bug with sounddevice that makes the audio cut one second earlier, so we
|
193 |
+
# pad it.
|
194 |
+
generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant")
|
195 |
+
|
196 |
+
# Trim excess silences to compensate for gaps in spectrograms (issue #53)
|
197 |
+
generated_wav = encoder.preprocess_wav(generated_wav)
|
198 |
+
|
199 |
+
# Save it on the disk
|
200 |
+
filename = "demo_output_1.wav"
|
201 |
+
print(generated_wav.dtype)
|
202 |
+
sf.write(filename, generated_wav.astype(np.float32), synthesizer.sample_rate)
|
203 |
+
print("\nSaved output as %s\n\n" % filename)
|