diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..207cbccd8f3a9c851465d6ddb3cbe501b3012b40 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,48 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +models/HolasoyGerman/HolasoyGerman/added_IVF3117_Flat_nprobe_1_HolasoyGerman_v2.index filter=lfs diff=lfs merge=lfs -text +models/Homer/Homer/added_IVF360_Flat_nprobe_1_HomerEsp_v1.index filter=lfs diff=lfs merge=lfs -text +models/Ibai/Ibai/added_IVF4601_Flat_nprobe_1_Ibai_v2.index filter=lfs diff=lfs merge=lfs -text +models/IlloJuan/IlloJuan/added_IVF593_Flat_nprobe_1_IlloJuan_v2.index filter=lfs diff=lfs merge=lfs -text +models/Joseju/Joseju/added_IVF256_Flat_nprobe_1_Joseju_v2.index filter=lfs diff=lfs merge=lfs -text +models/Quevedo/Quevedo/added_IVF2301_Flat_nprobe_10.index filter=lfs diff=lfs merge=lfs -text +models/Shadoune666/Shadoune666/added_IVF716_Flat_nprobe_1_Shadoune666_v2.index filter=lfs diff=lfs merge=lfs -text +models/Spreen/Spreen/added_IVF4737_Flat_nprobe_1_Spreen_v2.index filter=lfs diff=lfs merge=lfs -text +models/Totakeke/Totakeke/added_IVF256_Flat_nprobe_1_full_totakeke_v2.index filter=lfs diff=lfs merge=lfs -text +models/Vegetta777/Vegetta777/added_IVF2021_Flat_nprobe_1_Vegetta777_v2.index filter=lfs diff=lfs merge=lfs -text +models/Villager/Villager/added_IVF81_Flat_nprobe_1_v2.index filter=lfs diff=lfs merge=lfs -text +models/WalterWhite/WalterWhite/added_IVF458_Flat_nprobe_1_ww2_v2.index filter=lfs diff=lfs merge=lfs -text +models/FernandoAlonso/FernandoAlonso/added_IVF582_Flat_nprobe_1_fernando2_v2.index filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..11f4ae415ea820a8c82c69da45fe909c104c5611 --- /dev/null +++ b/README.md @@ -0,0 +1,14 @@ +--- +title: RVC TTS Demo +emoji: 🚀 +colorFrom: red +colorTo: pink +sdk: gradio +sdk_version: 3.36.1 +app_file: app.py +pinned: false +license: gpl-3.0 +duplicated_from: ImPavloh/RVC-TTS-Demo +--- + +Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..4c8bce2c69cfd2e7b7f55a3053738fe604c6a86a --- /dev/null +++ b/app.py @@ -0,0 +1,200 @@ +import os +import json +import torch +import asyncio +import librosa +import hashlib +import edge_tts +import gradio as gr +from config import Config +from vc_infer_pipeline import VC +from fairseq import checkpoint_utils +from lib.infer_pack.models import (SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono,) + +config = Config() + +def load_json_file(filepath): + with open(filepath, "r", encoding="utf-8") as f: content = json.load(f) + return content + +def file_checksum(file_path): + with open(file_path, 'rb') as f: + file_data = f.read() + return hashlib.md5(file_data).hexdigest() + +def get_existing_model_info(category_directory): + model_info_path = os.path.join(category_directory, 'model_info.json') + if os.path.exists(model_info_path): + with open(model_info_path, 'r') as f: return json.load(f) + return None + +def generate_model_info_files(): + folder_info = {} + model_directory = "models/" + for category_name in os.listdir(model_directory): + category_directory = os.path.join(model_directory, category_name) + if not os.path.isdir(category_directory): continue + + folder_info[category_name] = {"title": category_name, "folder_path": category_name} + existing_model_info = get_existing_model_info(category_directory) + model_info = {} + regenerate_model_info = False + + for model_name in os.listdir(category_directory): + model_path = os.path.join(category_directory, model_name) + if not os.path.isdir(model_path): continue + + model_data, regenerate = gather_model_info(category_directory, model_name, model_path, existing_model_info) + if model_data is not None: + model_info[model_name] = model_data + regenerate_model_info |= regenerate + + if regenerate_model_info: + with open(os.path.join(category_directory, 'model_info.json'), 'w') as f: json.dump(model_info, f, indent=4) + + folder_info_path = os.path.join(model_directory, 'folder_info.json') + with open(folder_info_path, 'w') as f: json.dump(folder_info, f, indent=4) + +def should_regenerate_model_info(existing_model_info, model_name, pth_checksum, index_checksum): + if existing_model_info is None or model_name not in existing_model_info: return True + return (existing_model_info[model_name]['model_path_checksum'] != pth_checksum or existing_model_info[model_name]['index_path_checksum'] != index_checksum) + +def get_model_files(model_path): return [f for f in os.listdir(model_path) if f.endswith('.pth') or f.endswith('.index')] + +def gather_model_info(category_directory, model_name, model_path, existing_model_info): + model_files = get_model_files(model_path) + if len(model_files) != 2: return None, False + + pth_file = [f for f in model_files if f.endswith('.pth')][0] + index_file = [f for f in model_files if f.endswith('.index')][0] + pth_checksum = file_checksum(os.path.join(model_path, pth_file)) + index_checksum = file_checksum(os.path.join(model_path, index_file)) + regenerate = should_regenerate_model_info(existing_model_info, model_name, pth_checksum, index_checksum) + + return {"title": model_name, "model_path": pth_file, "feature_retrieval_library": index_file, "model_path_checksum": pth_checksum, "index_path_checksum": index_checksum}, regenerate + +def create_vc_fn(model_name, tgt_sr, net_g, vc, if_f0, version, file_index): + def vc_fn(tts_text, tts_voice): + try: + if len(tts_text) > 100: return None + if tts_text is None or tts_voice is None: return None + asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3")) + audio, sr = librosa.load("tts.mp3", sr=16000, mono=True) + vc_input = "tts.mp3" + times = [0, 0, 0] + audio_opt = vc.pipeline(hubert_model, net_g, 0, audio, vc_input, times, 0, "pm", file_index, 0.7, if_f0, 3, tgt_sr, 0, 1, version, 0.5, f0_file=None) + return (tgt_sr, audio_opt) + except Exception: return None + return vc_fn + +def load_model_parameters(category_folder, character_name, info): + model_index = f"models/{category_folder}/{character_name}/{info['feature_retrieval_library']}" + cpt = torch.load(f"models/{category_folder}/{character_name}/{info['model_path']}", map_location="cpu") + return model_index, cpt + +def select_net_g(cpt, version, if_f0): + if version == "v1": + if if_f0 == 1: net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half) + else: net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) + elif version == "v2": + if if_f0 == 1: net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half) + else: net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) + return net_g + +def load_model_and_prepare(cpt, net_g): + del net_g.enc_q + net_g.load_state_dict(cpt["weight"], strict=False) + net_g.eval().to(config.device) + net_g = net_g.half() if config.is_half else net_g.float() + return net_g + +def create_and_append_model(models, model_functions, character_name, model_title, version, vc_fn): + models.append((character_name, model_title, version, vc_fn)) + model_functions[character_name] = vc_fn + return models, model_functions + +def load_model(): + categories = [] + model_functions = {} + folder_info = load_json_file("models/folder_info.json") + for category_name, category_info in folder_info.items(): + models = [] + models_info = load_json_file(f"models/{category_info['folder_path']}/model_info.json") + for character_name, info in models_info.items(): + model_index, cpt = load_model_parameters(category_info['folder_path'], character_name, info) + net_g = select_net_g(cpt, cpt.get("version", "v1"), cpt.get("f0", 1)) + cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] + net_g = load_model_and_prepare(cpt, net_g) + vc = VC(cpt["config"][-1], config) + vc_fn = create_vc_fn(info['model_path'], cpt["config"][-1], net_g, vc, cpt.get("f0", 1), cpt.get("version", "v1"), model_index) + models, model_functions = create_and_append_model(models, model_functions, character_name, info['title'], cpt.get("version", "v1"), vc_fn) + categories.append([category_info['title'], category_info['folder_path'], models]) + return categories, model_functions + +generate_model_info_files() + +css = """ +.gradio-container { font-family: 'IBM Plex Sans', sans-serif; } +footer { visibility: hidden; display: none; } +.center-container { display: flex; flex-direction: column; align-items: center; justify-content: center;} +""" + +if __name__ == '__main__': + global hubert_model + models, _, _ = checkpoint_utils.load_model_ensemble_and_task(["hubert_base.pt"], suffix="") + hubert_model = models[0] + hubert_model = hubert_model.to(config.device) + hubert_model = hubert_model.half() if config.is_half else hubert_model.float() + hubert_model.eval() + categories, model_functions = load_model() + tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) + voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list] + with gr.Blocks(css=css, title="Demo RVC TTS - Pavloh", theme=gr.themes.Soft(primary_hue="cyan", secondary_hue="blue", radius_size="lg", text_size="lg") + .set(loader_color="#0B0F19", shadow_drop='*shadow_drop_lg', block_border_width="3px")) as pavloh: + gr.HTML(""" +
+
+ + License + + + GitHub + +
+ + + +
+ + Twitter + +
+
+

🗣️ RVC TTS Demo - Pavloh

+
+

An AI-Powered Text-to-Speech

+

Try out the RVC Text-to-Speech Discord Bot

+
+ """) + + with gr.Row(): + with gr.Column(): + m1 = gr.Dropdown(label="📦 Voice Model", choices=list(model_functions.keys()), allow_custom_value=False, value="Ibai") + t2 = gr.Dropdown(label="⚙️ Voice style and language [Edge-TTS]", choices=voices, allow_custom_value=False, value="es-ES-AlvaroNeural-Male") + t1 = gr.Textbox(label="📝 Text to convert") + c1 = gr.Button("Convert", variant="primary") + a1 = gr.Audio(label="🔉 Converted Text", interactive=False) + + def call_selected_model_fn(selected_model, t1, t2): + vc_fn = model_functions[selected_model] + return vc_fn(t1, t2) + + c1.click(fn=call_selected_model_fn, inputs=[m1, t1, t2], outputs=[a1]) + + gr.HTML(""" +
+

By using this website, you agree to the license.

+
+ """) + +pavloh.queue(concurrency_count=1).launch() \ No newline at end of file diff --git a/config.py b/config.py new file mode 100644 index 0000000000000000000000000000000000000000..ddd0c75c4b71f44d12ce02e99646bd8c34eb8a84 --- /dev/null +++ b/config.py @@ -0,0 +1,31 @@ +import argparse +from multiprocessing import cpu_count + +class Config: + def __init__(self): + self.device = "cpu" + self.is_half = False + self.n_cpu = cpu_count() + (self.python_cmd, self.colab, self.noparallel, self.noautoopen, self.api) = self.arg_parse() + self.listen_port = 7860 + self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() + + @staticmethod + def arg_parse() -> tuple: + parser = argparse.ArgumentParser() + parser.add_argument("--pycmd", type=str, default="python") + parser.add_argument("--colab", action="store_true") + parser.add_argument("--noparallel", action="store_true") + parser.add_argument("--noautoopen", action="store_true") + parser.add_argument("--api", action="store_true") + cmd_opts = parser.parse_args() + + return (cmd_opts.pycmd, cmd_opts.colab, cmd_opts.noparallel, cmd_opts.noautoopen, cmd_opts.api) + + def device_config(self) -> tuple: + x_pad = 1 + x_query = 6 + x_center = 38 + x_max = 41 + + return x_pad, x_query, x_center, x_max \ No newline at end of file diff --git a/hubert_base.pt b/hubert_base.pt new file mode 100644 index 0000000000000000000000000000000000000000..72f47ab58564f01d5cc8b05c63bdf96d944551ff --- /dev/null +++ b/hubert_base.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f54b40fd2802423a5643779c4861af1e9ee9c1564dc9d32f54f20b5ffba7db96 +size 189507909 diff --git a/lib/infer_pack/__pycache__/attentions.cpython-310.pyc b/lib/infer_pack/__pycache__/attentions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8449e194fefec43e1b0afe0af05ca18572584aac Binary files /dev/null and b/lib/infer_pack/__pycache__/attentions.cpython-310.pyc differ diff --git a/lib/infer_pack/__pycache__/commons.cpython-310.pyc b/lib/infer_pack/__pycache__/commons.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20fce537a6e0448755f6a06aeb1995241eb7d15f Binary files /dev/null and b/lib/infer_pack/__pycache__/commons.cpython-310.pyc differ diff --git a/lib/infer_pack/__pycache__/models.cpython-310.pyc b/lib/infer_pack/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d46ea578021180ad17cf0200dd2251048fff0ca Binary files /dev/null and b/lib/infer_pack/__pycache__/models.cpython-310.pyc differ diff --git a/lib/infer_pack/__pycache__/modules.cpython-310.pyc b/lib/infer_pack/__pycache__/modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51565c81478f93cf86f6cfba1c421f48013b6e5b Binary files /dev/null and b/lib/infer_pack/__pycache__/modules.cpython-310.pyc differ diff --git a/lib/infer_pack/__pycache__/transforms.cpython-310.pyc b/lib/infer_pack/__pycache__/transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..846fbb824c3dc8d3b24997fc34ba6497dea6a335 Binary files /dev/null and b/lib/infer_pack/__pycache__/transforms.cpython-310.pyc differ diff --git a/lib/infer_pack/attentions.py b/lib/infer_pack/attentions.py new file mode 100644 index 0000000000000000000000000000000000000000..da4937f7d6ba428fe3d74d3e228056abe7cb998d --- /dev/null +++ b/lib/infer_pack/attentions.py @@ -0,0 +1,253 @@ +import copy +import math +import numpy as np +import torch +from torch import nn +from torch.nn import functional as F +from lib.infer_pack import commons +from lib.infer_pack import modules +from lib.infer_pack.modules import LayerNorm + +class Encoder(nn.Module): + def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0.0, window_size=10, **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + self.drop = nn.Dropout(p_dropout) + self.attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for _ in range(self.n_layers): + self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + +class Decoder(nn.Module): + def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0.0, proximal_bias=False, proximal_init=True, **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + self.drop = nn.Dropout(p_dropout) + self.self_attn_layers = nn.ModuleList() + self.norm_layers_0 = nn.ModuleList() + self.encdec_attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for _ in range(self.n_layers): + self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) + self.norm_layers_0.append(LayerNorm(hidden_channels)) + self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask, h, h_mask): + self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) + encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.self_attn_layers[i](x, x, self_attn_mask) + y = self.drop(y) + x = self.norm_layers_0[i](x + y) + + y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class MultiHeadAttention(nn.Module): + def __init__(self, channels, out_channels, n_heads, p_dropout=0.0, window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.p_dropout = p_dropout + self.window_size = window_size + self.heads_share = heads_share + self.block_length = block_length + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = nn.Conv1d(channels, channels, 1) + self.conv_k = nn.Conv1d(channels, channels, 1) + self.conv_v = nn.Conv1d(channels, channels, 1) + self.conv_o = nn.Conv1d(channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + if window_size is not None: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels**-0.5 + self.emb_rel_k = nn.Parameter( + torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) + * rel_stddev + ) + self.emb_rel_v = nn.Parameter( + torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) + * rel_stddev + ) + + nn.init.xavier_uniform_(self.conv_q.weight) + nn.init.xavier_uniform_(self.conv_k.weight) + nn.init.xavier_uniform_(self.conv_v.weight) + if proximal_init: + with torch.no_grad(): + self.conv_k.weight.copy_(self.conv_q.weight) + self.conv_k.bias.copy_(self.conv_q.bias) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + b, d, t_s, t_t = (*key.size(), query.size(2)) + query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + + scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) + if self.window_size is not None: + assert ( + t_s == t_t + ), "Relative attention is only available for self-attention." + key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) + rel_logits = self._matmul_with_relative_keys(query / math.sqrt(self.k_channels), key_relative_embeddings) + scores_local = self._relative_position_to_absolute_position(rel_logits) + scores = scores + scores_local + if self.proximal_bias: + assert t_s == t_t, "Proximal bias is only available for self-attention." + scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length is not None: + assert (t_s == t_t), "Local attention is only available for self-attention." + block_mask = ( + torch.ones_like(scores) + .triu(-self.block_length) + .tril(self.block_length) + ) + scores = scores.masked_fill(block_mask == 0, -1e4) + p_attn = F.softmax(scores, dim=-1) + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + if self.window_size is not None: + relative_weights = self._absolute_position_to_relative_position(p_attn) + value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) + output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) + output = (output.transpose(2, 3).contiguous().view(b, d, t_t)) + return output, p_attn + + def _matmul_with_relative_values(self, x, y): return torch.matmul(x, y.unsqueeze(0)) + def _matmul_with_relative_keys(self, x, y): return torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + + def _get_relative_embeddings(self, relative_embeddings, length): + max_relative_position = 2 * self.window_size + 1 + pad_length = max(length - (self.window_size + 1), 0) + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + if pad_length > 0:padded_relative_embeddings = F.pad(relative_embeddings, commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) + else: + padded_relative_embeddings = relative_embeddings + return padded_relative_embeddings[:, slice_start_position:slice_end_position] + + def _relative_position_to_absolute_position(self, x): + batch, heads, length, _ = x.size() + x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) + x_flat = x.view([batch, heads, length * 2 * length]) + x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])) + + return x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1 :] + + def _absolute_position_to_relative_position(self, x): + batch, heads, length, _ = x.size() + x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) + x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) + x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + return x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] + + def _attention_bias_proximal(self, length): + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + +class FFN(nn.Module): + def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0, activation=None, causal=False): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.activation = activation + self.causal = causal + self.padding = self._causal_padding if causal else self._same_padding + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) + self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) + self.drop = nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(self.padding(x * x_mask)) + if self.activation == "gelu": x = x * torch.sigmoid(1.702 * x) + else: x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(self.padding(x * x_mask)) + return x * x_mask + + def _causal_padding(self, x): + if self.kernel_size == 1: return x + pad_l = self.kernel_size - 1 + pad_r = 0 + return self._extracted_from__same_padding_5(pad_l, pad_r, x) + + def _same_padding(self, x): + if self.kernel_size == 1: return x + pad_l = (self.kernel_size - 1) // 2 + pad_r = self.kernel_size // 2 + return self._extracted_from__same_padding_5(pad_l, pad_r, x) + + def _extracted_from__same_padding_5(self, pad_l, pad_r, x): + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, commons.convert_pad_shape(padding)) + return x \ No newline at end of file diff --git a/lib/infer_pack/commons.py b/lib/infer_pack/commons.py new file mode 100644 index 0000000000000000000000000000000000000000..29d5c1c54e35f96a8548bac4b503af7ce39e0d5a --- /dev/null +++ b/lib/infer_pack/commons.py @@ -0,0 +1,112 @@ +import math +import numpy as np +import torch +from torch import nn +from torch.nn import functional as F + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if "Conv" in classname: m.weight.data.normal_(mean, std) + +def get_padding(kernel_size, dilation=1): + return (kernel_size * dilation - dilation) // 2 + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + return [item for sublist in l for item in sublist] + +def kl_divergence(m_p, logs_p, m_q, logs_q): + kl = logs_q - logs_p - 0.5 + kl += 0.5 * (torch.exp(2.0 * logs_p) + (m_p - m_q) ** 2) * torch.exp(-2.0 * logs_q) + return kl + +def rand_gumbel(shape): + return -torch.log(-torch.log(torch.rand(shape) + 1e-5)) + +def rand_gumbel_like(x): + return rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) + +def slice_segments(x, ids_str, segment_size=4, slice_dim=2): + if slice_dim == 1: ret = torch.zeros_like(x[:, :segment_size]) + else: ret = torch.zeros_like(x[:, :, :segment_size]) + + for i in range(x.size(0)): + idx_str = ids_str[i] + idx_end = idx_str + segment_size + ret[i] = x[i, ..., idx_str:idx_end] + return ret + +def rand_slice_segments(x, x_lengths=None, segment_size=4): + b, d, t = x.size() + + if x_lengths is None: x_lengths = t + + ids_str_max = x_lengths - segment_size + 1 + ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) + ret = slice_segments(x, ids_str, segment_size) + return ret, ids_str + +def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): + position = torch.arange(length, dtype=torch.float) + num_timescales = channels // 2 + log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (num_timescales - 1) + inv_timescales = min_timescale * torch.exp(torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) + scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) + signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) + signal = F.pad(signal, [0, 0, 0, channels % 2]) + signal = signal.view(1, channels, length) + return signal + +def apply_timing_signal_1d(x, operation='add', min_timescale=1.0, max_timescale=1.0e4): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) + signal = signal.to(dtype=x.dtype, device=x.device) + + if operation == 'add': return x + signal + elif operation == 'cat': return torch.cat([x, signal], axis=1) + +def subsequent_mask(length): + return torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) + +@torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + return t_act * s_act + +def shift_1d(x): + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] + return x + +def sequence_mask(length, max_length=None): + if max_length is None: max_length = length.max() + x = torch.arange(max_length, dtype=length.dtype, device=length.device) + return x.unsqueeze(0) < length.unsqueeze(1) + +def generate_path(duration, mask): + device = duration.device + b, _, t_y, t_x = mask.shape + cum_duration = torch.cumsum(duration, -1) + cum_duration_flat = cum_duration.view(b * t_x) + path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) + path = path.view(b, t_x, t_y) + path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] + path = path.unsqueeze(1).transpose(2, 3) * mask + return path + +def clip_grad_value_(parameters, clip_value, norm_type=2): + if isinstance(parameters, torch.Tensor): parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + norm_type = float(norm_type) + + if clip_value is not None: clip_value = float(clip_value) + + total_norm = 0 + for p in parameters: + param_norm = p.grad.data.norm(norm_type) + total_norm += param_norm.item() ** norm_type + if clip_value is not None: p.grad.data.clamp_(min=-clip_value, max=clip_value) + total_norm = total_norm ** (1.0 / norm_type) + return total_norm \ No newline at end of file diff --git a/lib/infer_pack/models.py b/lib/infer_pack/models.py new file mode 100644 index 0000000000000000000000000000000000000000..fbcac8deb5fe6fe2c77752ca5b10459ce71ea43b --- /dev/null +++ b/lib/infer_pack/models.py @@ -0,0 +1,711 @@ +import math +import torch +from torch import nn +from torch.nn import functional as F +from lib.infer_pack import modules +from lib.infer_pack import attentions +from lib.infer_pack import commons +from lib.infer_pack.commons import init_weights, get_padding +from torch.nn import Conv1d, ConvTranspose1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from lib.infer_pack.commons import init_weights +import numpy as np +from lib.infer_pack import commons + +class TextEncoder256(nn.Module): + def __init__(self, out_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, f0=True): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emb_phone = nn.Linear(256, hidden_channels) + self.lrelu = nn.LeakyReLU(0.1, inplace=True) + if f0 == True: self.emb_pitch = nn.Embedding(256, hidden_channels) + self.encoder = attentions.Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, phone, pitch, lengths): + if pitch is None: x = self.emb_phone(phone) + else: x = self.emb_phone(phone) + self.emb_pitch(pitch) + x = x * math.sqrt(self.hidden_channels) + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) + x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(x.dtype) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + +class TextEncoder768(nn.Module): + def __init__(self, out_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, f0=True): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emb_phone = nn.Linear(768, hidden_channels) + self.lrelu = nn.LeakyReLU(0.1, inplace=True) + if f0 == True: self.emb_pitch = nn.Embedding(256, hidden_channels) + self.encoder = attentions.Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, phone, pitch, lengths): + if pitch is None: x = self.emb_phone(phone) + else: x = self.emb_phone(phone) + self.emb_pitch(pitch) + x = x * math.sqrt(self.hidden_channels) + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) + x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(x.dtype) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + +class ResidualCouplingBlock(nn.Module): + def __init__(self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, n_flows=4, gin_channels=0): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + self.flows = nn.ModuleList() + for _ in range(n_flows): + self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) + self.flows.append(modules.Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): x = flow(x, x_mask, g=g, reverse=reverse) + return x + + def remove_weight_norm(self): + for i in range(self.n_flows): self.flows[i * 2].remove_weight_norm() + +class PosteriorEncoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + def remove_weight_norm(self): + self.enc.remove_weight_norm() + +class Generator(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=0, + ): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): self.ups.append(weight_norm(ConvTranspose1d(upsample_initial_channel // (2**i), upsample_initial_channel // (2 ** (i + 1)), k, u, padding=(k - u) // 2))) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes): self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: xs = self.resblocks[i * self.num_kernels + j](x) + else: xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + return x + + def remove_weight_norm(self): + for l in self.ups: remove_weight_norm(l) + for l in self.resblocks: l.remove_weight_norm() + +class SineGen(torch.nn.Module): + def __init__(self, samp_rate, harmonic_num=0, sine_amp=0.1, noise_std=0.003, voiced_threshold=0): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.dim = self.harmonic_num + 1 + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + + def _f02uv(self, f0): + uv = torch.ones_like(f0) + uv = uv * (f0 > self.voiced_threshold) + return uv + + def forward(self, f0, upp): + with torch.no_grad(): + f0 = f0[:, None].transpose(1, 2) + f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) + f0_buf[:, :, 0] = f0[:, :, 0] + for idx in np.arange(self.harmonic_num): f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2) + rad_values = (f0_buf / self.sampling_rate) % 1 + rand_ini = torch.rand(f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device) + rand_ini[:, 0] = 0 + rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini + tmp_over_one = torch.cumsum(rad_values, 1) + tmp_over_one *= upp + tmp_over_one = F.interpolate(tmp_over_one.transpose(2, 1), scale_factor=upp, mode="linear", align_corners=True).transpose(2, 1) + rad_values = F.interpolate(rad_values.transpose(2, 1), scale_factor=upp, mode="nearest").transpose(2, 1) + tmp_over_one %= 1 + tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 + cumsum_shift = torch.zeros_like(rad_values) + cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + sine_waves = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi) + sine_waves = sine_waves * self.sine_amp + uv = self._f02uv(f0) + uv = F.interpolate(uv.transpose(2, 1), scale_factor=upp, mode="nearest").transpose(2, 1) + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + + +class SourceModuleHnNSF(torch.nn.Module): + def __init__( + self, + sampling_rate, + harmonic_num=0, + sine_amp=0.1, + add_noise_std=0.003, + voiced_threshod=0, + is_half=True, + ): + super(SourceModuleHnNSF, self).__init__() + + self.sine_amp = sine_amp + self.noise_std = add_noise_std + self.is_half = is_half + self.l_sin_gen = SineGen(sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod) + self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) + self.l_tanh = torch.nn.Tanh() + + def forward(self, x, upp=None): + sine_wavs, uv, _ = self.l_sin_gen(x, upp) + if self.is_half: sine_wavs = sine_wavs.half() + sine_merge = self.l_tanh(self.l_linear(sine_wavs)) + return sine_merge, None, None + + +class GeneratorNSF(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels, + sr, + is_half=False, + ): + super(GeneratorNSF, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) + self.m_source = SourceModuleHnNSF(sampling_rate=sr, harmonic_num=0, is_half=is_half) + self.noise_convs = nn.ModuleList() + self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + c_cur = upsample_initial_channel // (2 ** (i + 1)) + self.ups.append( + weight_norm(ConvTranspose1d(upsample_initial_channel // (2**i), upsample_initial_channel // (2 ** (i + 1)), k, u, padding=(k - u) // 2))) + if i + 1 < len(upsample_rates): + stride_f0 = np.prod(upsample_rates[i + 1 :]) + self.noise_convs.append(Conv1d(1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2)) + else: self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes): self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + self.upp = np.prod(upsample_rates) + + def forward(self, x, f0, g=None): + har_source, noi_source, uv = self.m_source(f0, self.upp) + har_source = har_source.transpose(1, 2) + x = self.conv_pre(x) + if g is not None: x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + x_source = self.noise_convs[i](har_source) + x = x + x_source + xs = None + for j in range(self.num_kernels): + if xs is None: xs = self.resblocks[i * self.num_kernels + j](x) + else: xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + return x + + def remove_weight_norm(self): + for l in self.ups: remove_weight_norm(l) + for l in self.resblocks: l.remove_weight_norm() + +sr2sr = { + "32k": 32000, + "40k": 40000, + "48k": 48000, +} + +class SynthesizerTrnMs256NSFsid(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr, + **kwargs + ): + super().__init__() + if type(sr) == type("strr"): sr = sr2sr[sr] + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder256(inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout) + self.dec = GeneratorNSF(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels, sr=sr, is_half=kwargs["is_half"]) + self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) + self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward(self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds): + g = self.emb_g(ds).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) + pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) + o = self.dec(z_slice, pitchf, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + + +class SynthesizerTrnMs768NSFsid(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr, + **kwargs + ): + super().__init__() + if type(sr) == type("strr"): sr = sr2sr[sr] + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder768(inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout) + self.dec = GeneratorNSF(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels, sr=sr, is_half=kwargs["is_half"]) + self.enc_q = PosteriorEncoder(spec_channels,inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) + self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward(self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds): + g = self.emb_g(ds).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) + pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) + o = self.dec(z_slice, pitchf, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + +class SynthesizerTrnMs256NSFsid_nono(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr=None, + **kwargs + ): + super().__init__() + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder256(inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, f0=False) + self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) + self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) + self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward(self, phone, phone_lengths, y, y_lengths, ds): + g = self.emb_g(ds).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) + o = self.dec(z_slice, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, sid, max_len=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec((z * x_mask)[:, :, :max_len], g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + +class SynthesizerTrnMs768NSFsid_nono(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr=None, + **kwargs + ): + super().__init__() + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder768(inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, f0=False) + self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) + self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) + self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward(self, phone, phone_lengths, y, y_lengths, ds): + g = self.emb_g(ds).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) + o = self.dec(z_slice, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, sid, max_len=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec((z * x_mask)[:, :, :max_len], g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2, 3, 5, 7, 11, 17] + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs += [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for d in self.discriminators: + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + +class MultiPeriodDiscriminatorV2(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminatorV2, self).__init__() + periods = [2, 3, 5, 7, 11, 17, 23, 37] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs += [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for d in self.discriminators: + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([norm_f(Conv1d(1, 16, 15, 1, padding=7)), norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), norm_f(Conv1d(1024, 1024, 5, 1, padding=2))]) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), + ] + ) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + b, c, t = x.shape + if t % self.period != 0: + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap \ No newline at end of file diff --git a/lib/infer_pack/models_onnx.py b/lib/infer_pack/models_onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..1bbfb69e458245dc7215dfce1daa38827a4a3069 --- /dev/null +++ b/lib/infer_pack/models_onnx.py @@ -0,0 +1,582 @@ +import math +import torch +import numpy as np +from torch import nn +from torch.nn import functional as F +from torch.nn import Conv1d, ConvTranspose1d, Conv2d +from lib.infer_pack import modules, attentions, commons +from torch.nn.utils import weight_norm, remove_weight_norm +from lib.infer_pack.commons import init_weights, get_padding +from lib.infer_pack.commons import init_weights, sequence_mask +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from lib.infer_pack.modules import ResidualCouplingLayer, WN, ResBlock1, ResBlock2, LRELU_SLOPE + +class TextEncoder(nn.Module): + def __init__( + self, + input_dim, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=True, + ): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emb_phone = nn.Linear(input_dim, hidden_channels) + self.lrelu = nn.LeakyReLU(0.1, inplace=True) + + if f0:self.emb_pitch = nn.Embedding(256, hidden_channels) + self.encoder = attentions.Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, phone, pitch, lengths): + x = self.emb_phone(phone) + self.emb_pitch(pitch) if pitch is not None else self.emb_phone(phone) + x *= math.sqrt(self.hidden_channels) + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) + x_mask = torch.unsqueeze(sequence_mask(lengths, x.size(2)), 1).to(x.dtype) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + +class TextEncoder768(nn.Module): + def __init__( + self, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=True, + ): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emb_phone = nn.Linear(768, hidden_channels) + self.lrelu = nn.LeakyReLU(0.1, inplace=True) + if f0 == True:self.emb_pitch = nn.Embedding(256, hidden_channels) + self.encoder = attentions.Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, phone, pitch, lengths): + if pitch is None: x = self.emb_phone(phone) + else: x = self.emb_phone(phone) + self.emb_pitch(pitch) + x = x * math.sqrt(self.hidden_channels) + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) + x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(x.dtype) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + +class ResidualCouplingBlock(nn.Module): + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0, + ): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for _ in range(n_flows): + self.flows.append( + modules.ResidualCouplingLayer( + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + mean_only=True, + ) + ) + self.flows.append(modules.Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): x = flow(x, x_mask, g=g, reverse=reverse) + return x + + def remove_weight_norm(self): + for i in range(self.n_flows): + self.flows[i * 2].remove_weight_norm() + +class PosteriorEncoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN( + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + def remove_weight_norm(self): + self.enc.remove_weight_norm() + +class Generator(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=0, + ): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes): self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: xs = self.resblocks[i * self.num_kernels + j](x) + else: xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + for l in self.ups: remove_weight_norm(l) + for l in self.resblocks: l.remove_weight_norm() + +class SineGen(torch.nn.Module): + def __init__( + self, + samp_rate, + harmonic_num=0, + sine_amp=0.1, + noise_std=0.003, + voiced_threshold=0, + ): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.dim = self.harmonic_num + 1 + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + + def _f02uv(self, f0): + uv = torch.ones_like(f0) + uv = uv * (f0 > self.voiced_threshold) + return uv + + def forward(self, f0, upp): + with torch.no_grad(): + f0 = f0[:, None].transpose(1, 2) + f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) + f0_buf[:, :, 0] = f0[:, :, 0] + for idx in np.arange(self.harmonic_num): f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2) + rad_values = (f0_buf / self.sampling_rate) % 1 + rand_ini = torch.rand(f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device) + rand_ini[:, 0] = 0 + rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini + tmp_over_one = torch.cumsum(rad_values, 1) + tmp_over_one *= upp + tmp_over_one = F.interpolate(tmp_over_one.transpose(2, 1), scale_factor=upp, mode="linear", align_corners=True).transpose(2, 1) + rad_values = F.interpolate(rad_values.transpose(2, 1), scale_factor=upp, mode="nearest").transpose(2, 1) + tmp_over_one %= 1 + tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 + cumsum_shift = torch.zeros_like(rad_values) + cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + sine_waves = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi) + sine_waves = sine_waves * self.sine_amp + uv = self._f02uv(f0) + uv = F.interpolate(uv.transpose(2, 1), scale_factor=upp, mode="nearest").transpose(2, 1) + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + +class SourceModuleHnNSF(torch.nn.Module): + def __init__( + self, + sampling_rate, + harmonic_num=0, + sine_amp=0.1, + add_noise_std=0.003, + voiced_threshod=0, + is_half=True, + ): + super(SourceModuleHnNSF, self).__init__() + + self.sine_amp = sine_amp + self.noise_std = add_noise_std + self.is_half = is_half + self.l_sin_gen = SineGen(sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod) + self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) + self.l_tanh = torch.nn.Tanh() + + def forward(self, x, upp=None): + sine_wavs, uv, _ = self.l_sin_gen(x, upp) + if self.is_half: sine_wavs = sine_wavs.half() + sine_merge = self.l_tanh(self.l_linear(sine_wavs)) + return sine_merge, None, None + + +class GeneratorNSF(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels, + sr, + is_half=False, + ): + super(GeneratorNSF, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) + self.m_source = SourceModuleHnNSF(sampling_rate=sr, harmonic_num=0, is_half=is_half) + self.noise_convs = nn.ModuleList() + self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + c_cur = upsample_initial_channel // (2 ** (i + 1)) + self.ups.append(weight_norm(ConvTranspose1d(upsample_initial_channel // (2**i), upsample_initial_channel // (2 ** (i + 1)), k, u, padding=(k - u) // 2,))) + if i + 1 < len(upsample_rates): + stride_f0 = np.prod(upsample_rates[i + 1 :]) + self.noise_convs.append(Conv1d(1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2,)) + else: self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes): self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + self.upp = np.prod(upsample_rates) + + def forward(self, x, f0, g=None): + har_source, noi_source, uv = self.m_source(f0, self.upp) + har_source = har_source.transpose(1, 2) + x = self.conv_pre(x) + if g is not None: x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + x_source = self.noise_convs[i](har_source) + x = x + x_source + xs = None + for j in range(self.num_kernels): + if xs is None: xs = self.resblocks[i * self.num_kernels + j](x) + else: xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + return x + + def remove_weight_norm(self): + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + +sr2sr = {"32k": 32000,"40k": 40000,"48k": 48000,} + +class SynthesizerTrnMsNSFsidM(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr, + version, + **kwargs + ): + super().__init__() + if type(sr) == type("strr"): sr = sr2sr[sr] + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + self.spk_embed_dim = spk_embed_dim + if version == "v1": self.enc_p = TextEncoder(inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout) + else: self.enc_p = TextEncoder768(inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout) + self.dec = GeneratorNSF(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels, sr=sr, is_half=kwargs["is_half"]) + self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) + self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + self.speaker_map = None + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def construct_spkmixmap(self, n_speaker): + self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels)) + for i in range(n_speaker): self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) + self.speaker_map = self.speaker_map.unsqueeze(0) + + def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): + if self.speaker_map is not None: + g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) + g = g * self.speaker_map + g = torch.sum(g, dim=1) + g = g.transpose(0, -1).transpose(0, -2).squeeze(0) + else: + g = g.unsqueeze(0) + g = self.emb_g(g).transpose(1, 2) + + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask + z = self.flow(z_p, x_mask, g=g, reverse=True) + return self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2, 3, 5, 7, 11, 17] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs += [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for d in self.discriminators: + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + +class MultiPeriodDiscriminatorV2(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminatorV2, self).__init__() + periods = [2, 3, 5, 7, 11, 17, 23, 37] + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs += [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for d in self.discriminators: + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ] + ) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0),)), + norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0),)), + norm_f(Conv2d( 128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0),)), + norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0),)), + norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0),)), + ] + ) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + b, c, t = x.shape + if t % self.period != 0: + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap \ No newline at end of file diff --git a/lib/infer_pack/modules.py b/lib/infer_pack/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..9e87efaec1cef72aac3e7e7a23fda26b0bb75ea7 --- /dev/null +++ b/lib/infer_pack/modules.py @@ -0,0 +1,315 @@ +import copy +import math +import numpy as np +import scipy +import torch +from torch import nn +from torch.nn import functional as F +from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm +from lib.infer_pack import commons +from lib.infer_pack.commons import init_weights, get_padding +from lib.infer_pack.transforms import piecewise_rational_quadratic_transform + +LRELU_SLOPE = 0.1 + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-5): + super().__init__() + self.channels = channels + self.eps = eps + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + x = x.transpose(1, -1) + x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) + return x.transpose(1, -1) + +class ConvReluNorm(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + assert n_layers > 1, "Number of layers should be larger than 0." + self.conv_layers = nn.ModuleList() + self.norm_layers = nn.ModuleList() + self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) + for _ in range(n_layers - 1): + self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask): + x_org = x + for i in range(self.n_layers): + x = self.conv_layers[i](x * x_mask) + x = self.norm_layers[i](x) + x = self.relu_drop(x) + x = x_org + self.proj(x) + return x * x_mask + +class DDSConv(nn.Module): + def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): + super().__init__() + self.channels = channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + self.drop = nn.Dropout(p_dropout) + self.convs_sep = nn.ModuleList() + self.convs_1x1 = nn.ModuleList() + self.norms_1 = nn.ModuleList() + self.norms_2 = nn.ModuleList() + for i in range(n_layers): + dilation = kernel_size**i + padding = (kernel_size * dilation - dilation) + self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, groups=channels, dilation=dilation, padding=padding)) + self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) + self.norms_1.append(LayerNorm(channels)) + self.norms_2.append(LayerNorm(channels)) + + def forward(self, x, x_mask, g=None): + if g is not None: x = x + g + for i in range(self.n_layers): + y = self.convs_sep[i](x * x_mask) + y = self.norms_1[i](y) + y = F.gelu(y) + y = self.convs_1x1[i](y) + y = self.norms_2[i](y) + y = F.gelu(y) + y = self.drop(y) + x = x + y + return x * x_mask + +class WN(torch.nn.Module): + def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): + super(WN, self).__init__() + assert kernel_size % 2 == 1 + self.hidden_channels = hidden_channels + self.kernel_size = (kernel_size,) + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + + self.in_layers = torch.nn.ModuleList() + self.res_skip_layers = torch.nn.ModuleList() + self.drop = nn.Dropout(p_dropout) + + if gin_channels != 0: + cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") + + for i in range(n_layers): + dilation = dilation_rate**i + padding = int((kernel_size * dilation - dilation) / 2) + in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation, padding=padding) + in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") + self.in_layers.append(in_layer) + + if i < n_layers - 1: res_skip_channels = 2 * hidden_channels + else: res_skip_channels = hidden_channels + + res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) + res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") + self.res_skip_layers.append(res_skip_layer) + + def forward(self, x, x_mask, g=None, **kwargs): + output = torch.zeros_like(x) + n_channels_tensor = torch.IntTensor([self.hidden_channels]) + + if g is not None: g = self.cond_layer(g) + + for i in range(self.n_layers): + x_in = self.in_layers[i](x) + if g is not None: + cond_offset = i * 2 * self.hidden_channels + g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] + else: g_l = torch.zeros_like(x_in) + + acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) + acts = self.drop(acts) + + res_skip_acts = self.res_skip_layers[i](acts) + if i < self.n_layers - 1: + res_acts = res_skip_acts[:, : self.hidden_channels, :] + x = (x + res_acts) * x_mask + output = output + res_skip_acts[:, self.hidden_channels :, :] + else: output = output + res_skip_acts + return output * x_mask + + def remove_weight_norm(self): + if self.gin_channels != 0: torch.nn.utils.remove_weight_norm(self.cond_layer) + for l in self.in_layers: torch.nn.utils.remove_weight_norm(l) + for l in self.res_skip_layers: torch.nn.utils.remove_weight_norm(l) + +class ResBlock1(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): + super(ResBlock1, self).__init__() + self.convs1 = nn.ModuleList( + [ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], padding=get_padding(kernel_size, dilation[2])))]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList( + [ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1)))]) + self.convs2.apply(init_weights) + + def forward(self, x, x_mask=None): + for c1, c2 in zip(self.convs1, self.convs2): + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: xt = xt * x_mask + xt = c1(xt) + xt = F.leaky_relu(xt, LRELU_SLOPE) + if x_mask is not None: xt = xt * x_mask + xt = c2(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs1: remove_weight_norm(l) + for l in self.convs2: remove_weight_norm(l) + +class ResBlock2(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3)): + super(ResBlock2, self).__init__() + self.convs = nn.ModuleList([weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]))),weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1])))]) + self.convs.apply(init_weights) + + def forward(self, x, x_mask=None): + for c in self.convs: + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: xt = xt * x_mask + xt = c(xt) + x = xt + x + if x_mask is not None: x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs: remove_weight_norm(l) + +class Log(nn.Module): + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask + logdet = torch.sum(-y, [1, 2]) + return y, logdet + else: + x = torch.exp(x) * x_mask + return x + +class Flip(nn.Module): + def forward(self, x, *args, reverse=False, **kwargs): + x = torch.flip(x, [1]) + if reverse: return x + logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) + return x, logdet + +class ElementwiseAffine(nn.Module): + def __init__(self, channels): + super().__init__() + self.channels = channels + self.m = nn.Parameter(torch.zeros(channels, 1)) + self.logs = nn.Parameter(torch.zeros(channels, 1)) + + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = self.m + torch.exp(self.logs) * x + y = y * x_mask + logdet = torch.sum(self.logs * x_mask, [1, 2]) + return y, logdet + else: + x = (x - self.m) * torch.exp(-self.logs) * x_mask + return x + +class ResidualCouplingLayer(nn.Module): + def __init__(self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=0, gin_channels=0, mean_only=False): + assert channels % 2 == 0, "channels should be divisible by 2" + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.half_channels = channels // 2 + self.mean_only = mean_only + + self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) + self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) + self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) + self.post.weight.data.zero_() + self.post.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) * x_mask + h = self.enc(h, x_mask, g=g) + stats = self.post(h) * x_mask + if not self.mean_only: m, logs = torch.split(stats, [self.half_channels] * 2, 1) + else: + m = stats + logs = torch.zeros_like(m) + + if not reverse: + x1 = m + x1 * torch.exp(logs) * x_mask + x = torch.cat([x0, x1], 1) + logdet = torch.sum(logs, [1, 2]) + return x, logdet + else: + x1 = (x1 - m) * torch.exp(-logs) * x_mask + x = torch.cat([x0, x1], 1) + return x + + def remove_weight_norm(self): self.enc.remove_weight_norm() + +class ConvFlow(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): + super().__init__() + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.num_bins = num_bins + self.tail_bound = tail_bound + self.half_channels = in_channels // 2 + + self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) + self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) + self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) + h = self.convs(h, x_mask, g=g) + h = self.proj(h) * x_mask + + b, c, t = x0.shape + h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) + + unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_derivatives = h[..., 2 * self.num_bins :] + + x1, logabsdet = piecewise_rational_quadratic_transform(x1, unnormalized_widths, unnormalized_heights, unnormalized_derivatives, inverse=reverse, tails="linear", tail_bound=self.tail_bound) + + x = torch.cat([x0, x1], 1) * x_mask + logdet = torch.sum(logabsdet * x_mask, [1, 2]) + return (x, logdet) if not reverse else x \ No newline at end of file diff --git a/lib/infer_pack/onnx_inference.py b/lib/infer_pack/onnx_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..a94b71dbe32fe6b339c9098363257b6f8ef9e62a --- /dev/null +++ b/lib/infer_pack/onnx_inference.py @@ -0,0 +1,67 @@ +import onnxruntime +import librosa +import numpy as np +from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor + +VEC_PATH = "pretrained/vec-768-layer-12.onnx" +MAX_LENGTH = 50.0 +F0_MIN = 50 +F0_MAX = 1100 +F0_MEL_MIN = 1127 * np.log(1 + F0_MIN / 700) +F0_MEL_MAX = 1127 * np.log(1 + F0_MAX / 700) +RESAMPLING_RATE = 16000 + +class ContentVectorModel: + def __init__(self, vector_path=VEC_PATH): + providers = ["CPUExecutionProvider"] + self.model = onnxruntime.InferenceSession(vector_path, providers=providers) + + def __call__(self, audio_wave): return self.process_audio(audio_wave) + + def process_audio(self, audio_wave): + features = audio_wave.mean(-1) if audio_wave.ndim == 2 else audio_wave + features = np.expand_dims(np.expand_dims(features, 0), 0) + onnx_input = {self.model.get_inputs()[0].name: features} + logits = self.model.run(None, onnx_input)[0] + return logits.transpose(0, 2, 1) + + +class OnnxRVC: + def __init__(self, model_path, sampling_rate=40000, hop_size=512, vector_path=VEC_PATH): + self.vec_model = ContentVectorModel(f"{vector_path}.onnx") + providers = ["CPUExecutionProvider"] + self.model = onnxruntime.InferenceSession(model_path, providers=providers) + self.sampling_rate = sampling_rate + self.hop_size = hop_size + + def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd): + onnx_input = {self.model.get_inputs()[0].name: hubert, self.model.get_inputs()[1].name: hubert_length, self.model.get_inputs()[2].name: pitch, self.model.get_inputs()[3].name: pitchf, self.model.get_inputs()[4].name: ds, self.model.get_inputs()[5].name: rnd} + return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16) + + def inference(self, raw_path, sid, f0_method="pm", f0_up_key=0, pad_time=0.5, cr_threshold=0.02): + f0_predictor = PMF0Predictor(hop_length=self.hop_size, sampling_rate=self.sampling_rate, threshold=cr_threshold) + wav, sr = librosa.load(raw_path, sr=self.sampling_rate) + org_length = len(wav) + if org_length / sr > MAX_LENGTH: raise RuntimeError("Reached Max Length") + + wav16k = librosa.resample(wav, orig_sr=sr, target_sr=RESAMPLING_RATE) + hubert = self.vec_model(wav16k) + hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32) + hubert_length = hubert.shape[1] + + pitchf = f0_predictor.compute_f0(wav, hubert_length) + pitchf *= 2 ** (f0_up_key / 12) + pitch = pitchf.copy() + f0_mel = 1127 * np.log(1 + pitch / 700) + f0_mel = np.clip(f0_mel - F0_MEL_MIN, 0, None) * 254 / (F0_MEL_MAX - F0_MEL_MIN) + 1 + pitch = np.rint(f0_mel).astype(np.int64) + + pitchf = pitchf.reshape(1, -1).astype(np.float32) + pitch = pitch.reshape(1, -1) + ds = np.array([sid]).astype(np.int64) + rnd = np.random.randn(1, 192, hubert_length).astype(np.float32) + hubert_length = np.array([hubert_length]).astype(np.int64) + + out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze() + out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant") + return out_wav[:org_length] \ No newline at end of file diff --git a/lib/infer_pack/transforms.py b/lib/infer_pack/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..4de15fa1d9e8297bf2ee91d7cbf457617cc1ecef --- /dev/null +++ b/lib/infer_pack/transforms.py @@ -0,0 +1,115 @@ +import torch +from torch.nn import functional as F +import numpy as np + +DEFAULT_MIN_BIN_WIDTH = 1e-3 +DEFAULT_MIN_BIN_HEIGHT = 1e-3 +DEFAULT_MIN_DERIVATIVE = 1e-3 + +def piecewise_rational_quadratic_transform(inputs, unnormalized_widths, unnormalized_heights, unnormalized_derivatives, inverse=False, tails=None, tail_bound=1.0, min_bin_width=DEFAULT_MIN_BIN_WIDTH, min_bin_height=DEFAULT_MIN_BIN_HEIGHT, min_derivative=DEFAULT_MIN_DERIVATIVE): + if tails is None: + spline_fn = rational_quadratic_spline + spline_kwargs = {} + else: + spline_fn = unconstrained_rational_quadratic_spline + spline_kwargs = {"tails": tails, "tail_bound": tail_bound} + + return spline_fn(inputs=inputs, unnormalized_widths=unnormalized_widths, unnormalized_heights=unnormalized_heights, unnormalized_derivatives=unnormalized_derivatives, inverse=inverse, min_bin_width=min_bin_width, min_bin_height=min_bin_height, min_derivative=min_derivative, **spline_kwargs) + +def searchsorted(bin_locations, inputs, eps=1e-6): + bin_locations[..., -1] += eps + return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 + +def unconstrained_rational_quadratic_spline(inputs, unnormalized_widths, unnormalized_heights, unnormalized_derivatives, inverse=False, tails="linear", tail_bound=1.0, min_bin_width=DEFAULT_MIN_BIN_WIDTH, min_bin_height=DEFAULT_MIN_BIN_HEIGHT, min_derivative=DEFAULT_MIN_DERIVATIVE): + if tails != "linear": raise RuntimeError(f"{tails} tails are not implemented.") + + unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) + constant = np.log(np.exp(1 - min_derivative) - 1) + unnormalized_derivatives[..., 0] = constant + unnormalized_derivatives[..., -1] = constant + + inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) + outside_interval_mask = ~inside_interval_mask + outputs = torch.where(outside_interval_mask, inputs, torch.zeros_like(inputs)) + logabsdet = torch.zeros_like(inputs) + + inside_outputs, inside_logabsdet = rational_quadratic_spline( + inputs=inputs[inside_interval_mask], + unnormalized_widths=unnormalized_widths[inside_interval_mask, :], + unnormalized_heights=unnormalized_heights[inside_interval_mask, :], + unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], + inverse=inverse, left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, + min_bin_width=min_bin_width, min_bin_height=min_bin_height, min_derivative=min_derivative) + + outputs[inside_interval_mask] = inside_outputs + logabsdet[inside_interval_mask] = inside_logabsdet + + return outputs, logabsdet + +def rational_quadratic_spline(inputs, unnormalized_widths, unnormalized_heights, unnormalized_derivatives, inverse=False, left=0.0, right=1.0, bottom=0.0, top=1.0, min_bin_width=DEFAULT_MIN_BIN_WIDTH, min_bin_height=DEFAULT_MIN_BIN_HEIGHT, min_derivative=DEFAULT_MIN_DERIVATIVE): + num_bins = unnormalized_widths.shape[-1] + + if min_bin_width * num_bins > 1.0: raise ValueError("Minimal bin width too large for the number of bins") + if min_bin_height * num_bins > 1.0: raise ValueError("Minimal bin height too large for the number of bins") + + widths, heights = compute_widths_and_heights(unnormalized_widths, unnormalized_heights, min_bin_width, min_bin_height, num_bins, left, right, bottom, top) + cumwidths, cumheights = widths.cumsum(dim=-1), heights.cumsum(dim=-1) + cumwidths[..., 0] = left + cumwidths[..., -1] = right + cumheights[..., 0] = bottom + cumheights[..., -1] = top + widths, heights = cumwidths[..., 1:] - cumwidths[..., :-1], cumheights[..., 1:] - cumheights[..., :-1] + + derivatives = min_derivative + F.softplus(unnormalized_derivatives) + + if inverse: bin_idx = searchsorted(cumheights, inputs)[..., None] + else: bin_idx = searchsorted(cumwidths, inputs)[..., None] + + gather_args = (-1, bin_idx) + input_cumwidths, input_bin_widths, input_cumheights, input_delta, input_derivatives, input_derivatives_plus_one, input_heights = map( + lambda tensor: tensor.gather(*gather_args)[..., 0], + (cumwidths, widths, cumheights, heights / widths, derivatives, derivatives[..., 1:], heights)) + + if inverse: outputs, logabsdet = inverse_rational_quadratic_spline(inputs, input_cumheights, input_heights, input_derivatives, input_derivatives_plus_one, input_delta, input_bin_widths, input_cumwidths) + else: outputs, logabsdet = direct_rational_quadratic_spline(inputs, input_cumwidths, input_bin_widths, input_cumheights, input_heights, input_derivatives, input_derivatives_plus_one, input_delta) + + return outputs, logabsdet + +def compute_widths_and_heights(unnormalized_widths, unnormalized_heights, min_bin_width, min_bin_height, num_bins, left, right, bottom, top): + widths = F.softmax(unnormalized_widths, dim=-1) + widths = min_bin_width + (1 - min_bin_width * num_bins) * widths + widths = (right - left) * widths + left + + heights = F.softmax(unnormalized_heights, dim=-1) + heights = min_bin_height + (1 - min_bin_height * num_bins) * heights + heights = (top - bottom) * heights + bottom + + return widths, heights + +def inverse_rational_quadratic_spline(inputs, input_cumheights, input_heights, input_derivatives, input_derivatives_plus_one, input_delta, input_bin_widths, input_cumwidths): + a = (inputs - input_cumheights) * (input_derivatives + input_derivatives_plus_one - 2 * input_delta) + input_heights * (input_delta - input_derivatives) + b = input_heights * input_derivatives - (inputs - input_cumheights) * (input_derivatives + input_derivatives_plus_one - 2 * input_delta) + c = -input_delta * (inputs - input_cumheights) + + discriminant = b.pow(2) - 4 * a * c + assert (discriminant >= 0).all() + + root = (2 * c) / (-b - torch.sqrt(discriminant)) + outputs = root * input_bin_widths + input_cumwidths + theta_one_minus_theta = root * (1 - root) + denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)* theta_one_minus_theta) + derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)+ 2 * input_delta * theta_one_minus_theta+ input_derivatives * (1 - root).pow(2)) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, -logabsdet + +def direct_rational_quadratic_spline(inputs, input_cumwidths, input_bin_widths, input_cumheights, input_heights, input_derivatives, input_derivatives_plus_one, input_delta): + theta = (inputs - input_cumwidths) / input_bin_widths + theta_one_minus_theta = theta * (1 - theta) + numerator = input_heights * (input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta) + denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) * theta_one_minus_theta) + outputs = input_cumheights + numerator / denominator + derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) + 2 * input_delta * theta_one_minus_theta + input_derivatives * (1 - theta).pow(2)) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, logabsdet \ No newline at end of file diff --git a/models/FernandoAlonso/FernandoAlonso/added_IVF582_Flat_nprobe_1_fernando2_v2.index b/models/FernandoAlonso/FernandoAlonso/added_IVF582_Flat_nprobe_1_fernando2_v2.index new file mode 100644 index 0000000000000000000000000000000000000000..7a96c205c5475b987f31ebe0d71a1b7919942d87 --- /dev/null +++ b/models/FernandoAlonso/FernandoAlonso/added_IVF582_Flat_nprobe_1_fernando2_v2.index @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c5d70bed6e4bed0b54eaef9c1f2dd180fda94035287baafadb99a456b8cbd52 +size 71801099 diff --git a/models/FernandoAlonso/FernandoAlonso/fernando2.pth b/models/FernandoAlonso/FernandoAlonso/fernando2.pth new file mode 100644 index 0000000000000000000000000000000000000000..3ed6f842549a0a69d8d3bad38c3461d4b741de0e --- /dev/null +++ b/models/FernandoAlonso/FernandoAlonso/fernando2.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3926c2925ebe47d0c7a8d5b947813f0866a08d2970a10cb69d64f1b6e2371bbc +size 55224197 diff --git a/models/HolasoyGerman/HolasoyGerman/HolasoyGerman.pth b/models/HolasoyGerman/HolasoyGerman/HolasoyGerman.pth new file mode 100644 index 0000000000000000000000000000000000000000..5c578cd14f44b8f610d4a150f8eb1d46f2b4859c --- /dev/null +++ b/models/HolasoyGerman/HolasoyGerman/HolasoyGerman.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db9c812c5ecd6e7afa5b0bee0300741ba87731097c6449b64924d46c10644247 +size 55226033 diff --git a/models/HolasoyGerman/HolasoyGerman/added_IVF3117_Flat_nprobe_1_HolasoyGerman_v2.index b/models/HolasoyGerman/HolasoyGerman/added_IVF3117_Flat_nprobe_1_HolasoyGerman_v2.index new file mode 100644 index 0000000000000000000000000000000000000000..95a7d812a4bf7678e6cd83cf5e1312a08d7edc8b --- /dev/null +++ b/models/HolasoyGerman/HolasoyGerman/added_IVF3117_Flat_nprobe_1_HolasoyGerman_v2.index @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a11850d919513a315e2a4ec41586a74c650f0ccaf3fc16d8aebff4e63ce07dc4 +size 384023779 diff --git a/models/HolasoyGerman/model_info.json b/models/HolasoyGerman/model_info.json new file mode 100644 index 0000000000000000000000000000000000000000..77858dc2ddd3e8d80c9cf475ae27c44a53457149 --- /dev/null +++ b/models/HolasoyGerman/model_info.json @@ -0,0 +1,9 @@ +{ + "HolasoyGerman": { + "title": "HolasoyGerman", + "model_path": "HolasoyGerman.pth", + "feature_retrieval_library": "added_IVF3117_Flat_nprobe_1_HolasoyGerman_v2.index", + "model_path_checksum": "74633371eb2c2a75292633ee8922fddc", + "index_path_checksum": "febffc21f866063b40900cd068d379b9" + } +} \ No newline at end of file diff --git a/models/Homer/Homer/HomerEsp.pth b/models/Homer/Homer/HomerEsp.pth new file mode 100644 index 0000000000000000000000000000000000000000..32a6486a5df9c5eef70d9ca1944f692e922b910f --- /dev/null +++ b/models/Homer/Homer/HomerEsp.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:309244d07dd5f611f4bca2415a933a6bf499d1a803cd8df45e005d8256373b65 +size 55027130 diff --git a/models/Homer/Homer/added_IVF360_Flat_nprobe_1_HomerEsp_v1.index b/models/Homer/Homer/added_IVF360_Flat_nprobe_1_HomerEsp_v1.index new file mode 100644 index 0000000000000000000000000000000000000000..37c6e9d8fb91934df637ec94b5dc30d943dbc7e9 --- /dev/null +++ b/models/Homer/Homer/added_IVF360_Flat_nprobe_1_HomerEsp_v1.index @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3b3375a8e01675a1a46439af33ff4941f52eb9592b8784c154dcd55cf9eba03 +size 14861971 diff --git a/models/Homer/model_info.json b/models/Homer/model_info.json new file mode 100644 index 0000000000000000000000000000000000000000..3223a251e58e445390fe55af30744d4bb4acada8 --- /dev/null +++ b/models/Homer/model_info.json @@ -0,0 +1,9 @@ +{ + "Homer": { + "title": "Homer", + "model_path": "HomerEsp.pth", + "feature_retrieval_library": "added_IVF360_Flat_nprobe_1_HomerEsp_v1.index", + "model_path_checksum": "7df9a9cdf205fe13f5ea6d73d7e9fd41", + "index_path_checksum": "5d554ed74ef44a057d4f997dabe8c3ea" + } +} \ No newline at end of file diff --git a/models/Ibai/Ibai/Ibai.pth b/models/Ibai/Ibai/Ibai.pth new file mode 100644 index 0000000000000000000000000000000000000000..fbee77e780c25b3ccb7b3245cf7744d909879afa --- /dev/null +++ b/models/Ibai/Ibai/Ibai.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa102e749f66ede51b3bc43a4cceaeeaac7abd26c9f20348a233d042349d0467 +size 55227410 diff --git a/models/Ibai/Ibai/added_IVF4601_Flat_nprobe_1_Ibai_v2.index b/models/Ibai/Ibai/added_IVF4601_Flat_nprobe_1_Ibai_v2.index new file mode 100644 index 0000000000000000000000000000000000000000..c8385992ace5610ef984d1f4dc5c1d864aa98648 --- /dev/null +++ b/models/Ibai/Ibai/added_IVF4601_Flat_nprobe_1_Ibai_v2.index @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:177232ae8f7428fbc925719e11fcfd73a48b0919c3f94fd64eae6ff9f815c440 +size 566889539 diff --git a/models/Ibai/model_info.json b/models/Ibai/model_info.json new file mode 100644 index 0000000000000000000000000000000000000000..bc35ddb4bb5bee2a11cb49aa471b1b09ef75feb8 --- /dev/null +++ b/models/Ibai/model_info.json @@ -0,0 +1,9 @@ +{ + "Ibai": { + "title": "Ibai", + "model_path": "Ibai.pth", + "feature_retrieval_library": "added_IVF4601_Flat_nprobe_1_Ibai_v2.index", + "model_path_checksum": "098e96b7158edd654d0e269959ac9a20", + "index_path_checksum": "b16da2fa68a6341f8fc409a83db68dcf" + } +} \ No newline at end of file diff --git a/models/IlloJuan/IlloJuan/IlloJuan.pth b/models/IlloJuan/IlloJuan/IlloJuan.pth new file mode 100644 index 0000000000000000000000000000000000000000..25d419497c44f61ec13510d8fc987598fde07fc3 --- /dev/null +++ b/models/IlloJuan/IlloJuan/IlloJuan.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80c3229a3b5ee888ffb9f8b85385de9705966b156ea4471619f25186318f55b4 +size 55223738 diff --git a/models/IlloJuan/IlloJuan/added_IVF593_Flat_nprobe_1_IlloJuan_v2.index b/models/IlloJuan/IlloJuan/added_IVF593_Flat_nprobe_1_IlloJuan_v2.index new file mode 100644 index 0000000000000000000000000000000000000000..7bd424e273276a2b29034b9f91e61f875d4fafe1 --- /dev/null +++ b/models/IlloJuan/IlloJuan/added_IVF593_Flat_nprobe_1_IlloJuan_v2.index @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3bce6d0a1f554f5b5e38bb8f5d72e15a05c2383261f72e4fb5c06c2d714763f +size 73162459 diff --git a/models/IlloJuan/model_info.json b/models/IlloJuan/model_info.json new file mode 100644 index 0000000000000000000000000000000000000000..2a5327087761dff0a1de98bc6f933e1b902d3ff3 --- /dev/null +++ b/models/IlloJuan/model_info.json @@ -0,0 +1,9 @@ +{ + "IlloJuan": { + "title": "IlloJuan", + "model_path": "IlloJuan.pth", + "feature_retrieval_library": "added_IVF593_Flat_nprobe_1_IlloJuan_v2.index", + "model_path_checksum": "06a20ffd281284950537ebdbd227b8a9", + "index_path_checksum": "fcae1daa14cc24d862ca09216a02595c" + } +} \ No newline at end of file diff --git a/models/Quevedo/Quevedo/added_IVF2301_Flat_nprobe_10.index b/models/Quevedo/Quevedo/added_IVF2301_Flat_nprobe_10.index new file mode 100644 index 0000000000000000000000000000000000000000..489b1685d1b6fb487c0dfad5ffa5ac7b02dbf752 --- /dev/null +++ b/models/Quevedo/Quevedo/added_IVF2301_Flat_nprobe_10.index @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fdfabf3c2a559c0c5649f9991e667157bb14440a49860dce02e06ec26a34846 +size 95005027 diff --git a/models/Quevedo/Quevedo/quevedo.pth b/models/Quevedo/Quevedo/quevedo.pth new file mode 100644 index 0000000000000000000000000000000000000000..5484624ec4a64440e1f4e0875fd8b7c267ad88d3 --- /dev/null +++ b/models/Quevedo/Quevedo/quevedo.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:071f9b5e30ae0c219b47de25a9300b4097f5a7d8025946088ce5d505d9ae6227 +size 55026095 diff --git a/models/Quevedo/model_info.json b/models/Quevedo/model_info.json new file mode 100644 index 0000000000000000000000000000000000000000..7ce685bad91702640a9e99cdfd3eed42c13e4d69 --- /dev/null +++ b/models/Quevedo/model_info.json @@ -0,0 +1,9 @@ +{ + "Quevedo": { + "title": "Quevedo", + "model_path": "quevedo.pth", + "feature_retrieval_library": "added_IVF2301_Flat_nprobe_10.index", + "model_path_checksum": "6325e08a29a9cbfd0625dfe34be24a0c", + "index_path_checksum": "924db9553a0a70538a53e625fbadde40" + } +} \ No newline at end of file diff --git a/models/Shadoune666/Shadoune666/Shadoune666.pth b/models/Shadoune666/Shadoune666/Shadoune666.pth new file mode 100644 index 0000000000000000000000000000000000000000..d10297f15e2db1b4454c9bdd661fe8827e2cbb7e --- /dev/null +++ b/models/Shadoune666/Shadoune666/Shadoune666.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1447dd914d38c33b377b913c5ebc5f9bddf5a71195e2bb9b400af9bde6735e91 +size 55225115 diff --git a/models/Shadoune666/Shadoune666/added_IVF716_Flat_nprobe_1_Shadoune666_v2.index b/models/Shadoune666/Shadoune666/added_IVF716_Flat_nprobe_1_Shadoune666_v2.index new file mode 100644 index 0000000000000000000000000000000000000000..75efbdb85bfc2fdf7205ec0afbf77e487acfdafc --- /dev/null +++ b/models/Shadoune666/Shadoune666/added_IVF716_Flat_nprobe_1_Shadoune666_v2.index @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf519d09b556d5052f68a0be8628d10071e294de284291810b9ed88d2f8e5981 +size 88328379 diff --git a/models/Shadoune666/model_info.json b/models/Shadoune666/model_info.json new file mode 100644 index 0000000000000000000000000000000000000000..802bfdf0183648d070621a8fd8bb55e49d2776bb --- /dev/null +++ b/models/Shadoune666/model_info.json @@ -0,0 +1,9 @@ +{ + "Shadoune666": { + "title": "Shadoune666", + "model_path": "Shadoune666.pth", + "feature_retrieval_library": "added_IVF716_Flat_nprobe_1_Shadoune666_v2.index", + "model_path_checksum": "7bca067c9055104ebea2bf3f6c7bfde4", + "index_path_checksum": "b23d61f502025b4f9d967e4de1935d0b" + } +} \ No newline at end of file diff --git a/models/Spreen/Spreen/Spreen.pth b/models/Spreen/Spreen/Spreen.pth new file mode 100644 index 0000000000000000000000000000000000000000..b45005cc214452fc8e114abb313157eb6a3f8e46 --- /dev/null +++ b/models/Spreen/Spreen/Spreen.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4be95ea3bb27866c12fafd46ee9997165d025f50e35260b23e2c175c942f6cff +size 55216420 diff --git a/models/Spreen/Spreen/added_IVF4737_Flat_nprobe_1_Spreen_v2.index b/models/Spreen/Spreen/added_IVF4737_Flat_nprobe_1_Spreen_v2.index new file mode 100644 index 0000000000000000000000000000000000000000..3d23bde2676752c69bf885aa63e3063248ce28ff --- /dev/null +++ b/models/Spreen/Spreen/added_IVF4737_Flat_nprobe_1_Spreen_v2.index @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:669ca397e95bf51ff0e3f1aabe1ca60c83a5a6c85653ba0f1e550089c63b4305 +size 583660139 diff --git a/models/Spreen/model_info.json b/models/Spreen/model_info.json new file mode 100644 index 0000000000000000000000000000000000000000..75d081ba28fc3d14125b5859d11ee85451386815 --- /dev/null +++ b/models/Spreen/model_info.json @@ -0,0 +1,9 @@ +{ + "Spreen": { + "title": "Spreen", + "model_path": "Spreen.pth", + "feature_retrieval_library": "added_IVF4737_Flat_nprobe_1_Spreen_v2.index", + "model_path_checksum": "dbdb20d98928308c2d048444efb34b77", + "index_path_checksum": "213bbb9c387426fadc97c41e97f34b0a" + } +} \ No newline at end of file diff --git a/models/Totakeke/Totakeke/added_IVF256_Flat_nprobe_1_full_totakeke_v2.index b/models/Totakeke/Totakeke/added_IVF256_Flat_nprobe_1_full_totakeke_v2.index new file mode 100644 index 0000000000000000000000000000000000000000..5e57f260360da5260b043305e2bfafab4e93cd28 --- /dev/null +++ b/models/Totakeke/Totakeke/added_IVF256_Flat_nprobe_1_full_totakeke_v2.index @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b88bd6d822fa8391fa2ca6dd6e0c6eac8ac031385519da3fd87d2a6e8f272151 +size 31588619 diff --git a/models/Totakeke/Totakeke/full_totakeke_e150_s44400.pth b/models/Totakeke/Totakeke/full_totakeke_e150_s44400.pth new file mode 100644 index 0000000000000000000000000000000000000000..265450b2e2419ff10fdefd7a8f073b5c399bf36c --- /dev/null +++ b/models/Totakeke/Totakeke/full_totakeke_e150_s44400.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3fb774cbbb74ae6005a87e370fc01337d3a24f7c5bb8c0962b335e4bd69488b +size 57590901 diff --git a/models/Totakeke/model_info.json b/models/Totakeke/model_info.json new file mode 100644 index 0000000000000000000000000000000000000000..018fd930592d9d14256a03ad45a4426966540f6d --- /dev/null +++ b/models/Totakeke/model_info.json @@ -0,0 +1,9 @@ +{ + "Totakeke": { + "title": "Totakeke", + "model_path": "full_totakeke_e150_s44400.pth", + "feature_retrieval_library": "added_IVF256_Flat_nprobe_1_full_totakeke_v2.index", + "model_path_checksum": "ccb3a07fd141c6e918eca8a969d95c5d", + "index_path_checksum": "d4b2d2662efdc0bcb8f3610a686525c9" + } +} \ No newline at end of file diff --git a/models/Vegetta777/Vegetta777/Vegetta777.pth b/models/Vegetta777/Vegetta777/Vegetta777.pth new file mode 100644 index 0000000000000000000000000000000000000000..acce06d49d6e5b680b0040afcd6a960b0be33ef3 --- /dev/null +++ b/models/Vegetta777/Vegetta777/Vegetta777.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f13483615d05c1b31a4eb0fbee35ac5dca9f9a48a36a0165b947c08303ce75e5 +size 55218256 diff --git a/models/Vegetta777/Vegetta777/added_IVF2021_Flat_nprobe_1_Vegetta777_v2.index b/models/Vegetta777/Vegetta777/added_IVF2021_Flat_nprobe_1_Vegetta777_v2.index new file mode 100644 index 0000000000000000000000000000000000000000..5f91aa9d368cf03c85d8659f0b042b42000c0363 --- /dev/null +++ b/models/Vegetta777/Vegetta777/added_IVF2021_Flat_nprobe_1_Vegetta777_v2.index @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd713395db0957eb626a40d096ee29ec8bebe19fac3d9ce356a3e587920c71d8 +size 248990419 diff --git a/models/Vegetta777/model_info.json b/models/Vegetta777/model_info.json new file mode 100644 index 0000000000000000000000000000000000000000..4a779126927076aadcef4e9532fa15126956d87d --- /dev/null +++ b/models/Vegetta777/model_info.json @@ -0,0 +1,9 @@ +{ + "Vegetta777": { + "title": "Vegetta777", + "model_path": "Vegetta777.pth", + "feature_retrieval_library": "added_IVF2021_Flat_nprobe_1_Vegetta777_v2.index", + "model_path_checksum": "9bc80abfc506e98056e1db7b10000dea", + "index_path_checksum": "5a53e6f6a997e892e5fb26a7a6959550" + } +} \ No newline at end of file diff --git a/models/Villager/Villager/added_IVF81_Flat_nprobe_1_v2.index b/models/Villager/Villager/added_IVF81_Flat_nprobe_1_v2.index new file mode 100644 index 0000000000000000000000000000000000000000..a5c85b58f78306a29e0051e1833ae87a751cd8bd --- /dev/null +++ b/models/Villager/Villager/added_IVF81_Flat_nprobe_1_v2.index @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8b8f5b445b1a1c3ee7155f3a121075399ca5b5a01032c459168b739c7c633e5 +size 10013219 diff --git a/models/Villager/Villager/villager.pth b/models/Villager/Villager/villager.pth new file mode 100644 index 0000000000000000000000000000000000000000..fdb494ec8d7baf037c7a85671f36edb96c071651 --- /dev/null +++ b/models/Villager/Villager/villager.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2d94a1084bc9d65cd60ab7fb229a83476e79f8b75ec89e7343d0b9352ea1739 +size 55223738 diff --git a/models/Villager/model_info.json b/models/Villager/model_info.json new file mode 100644 index 0000000000000000000000000000000000000000..fcad95eda470beb5594903c7ba7a69d0218b89ba --- /dev/null +++ b/models/Villager/model_info.json @@ -0,0 +1,9 @@ +{ + "Villager": { + "title": "Villager", + "model_path": "villager.pth", + "feature_retrieval_library": "added_IVF81_Flat_nprobe_1_v2.index", + "model_path_checksum": "587b2be8f6fa60f7533dddf1a007e2fa", + "index_path_checksum": "13ef69f0f2181f77d45c4d36d7364f2b" + } +} \ No newline at end of file diff --git a/models/WalterWhite/WalterWhite/added_IVF458_Flat_nprobe_1_ww2_v2.index b/models/WalterWhite/WalterWhite/added_IVF458_Flat_nprobe_1_ww2_v2.index new file mode 100644 index 0000000000000000000000000000000000000000..fbe46fd844efe40a4f3588e24b05387dc0de51c9 --- /dev/null +++ b/models/WalterWhite/WalterWhite/added_IVF458_Flat_nprobe_1_ww2_v2.index @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2030573a1685644ec51b816bb93194d813a30aa7e066fe27b849dc1e80f7706 +size 56527379 diff --git a/models/WalterWhite/WalterWhite/ww2.pth b/models/WalterWhite/WalterWhite/ww2.pth new file mode 100644 index 0000000000000000000000000000000000000000..bf8a9a587daa8aa96953e5d4d03bffe12ed2ca77 --- /dev/null +++ b/models/WalterWhite/WalterWhite/ww2.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82e6f75403e2cb820df885fbebef827973494d55176d8e523f60e209d7cc03ea +size 57551491 diff --git a/models/WalterWhite/model_info.json b/models/WalterWhite/model_info.json new file mode 100644 index 0000000000000000000000000000000000000000..394bbc37497a18bfccc59045f2a390c603d8f47a --- /dev/null +++ b/models/WalterWhite/model_info.json @@ -0,0 +1,9 @@ +{ + "WalterWhite": { + "title": "WalterWhite", + "model_path": "ww2.pth", + "feature_retrieval_library": "added_IVF458_Flat_nprobe_1_ww2_v2.index", + "model_path_checksum": "dca6c0e3977e4fa61edb7e7324cf882a", + "index_path_checksum": "88108c501ab0ff5c0cc13c3d6df15374" + } +} \ No newline at end of file diff --git a/models/folder_info.json b/models/folder_info.json new file mode 100644 index 0000000000000000000000000000000000000000..9fa042322316a50cb8d393a862430acfbc8af0f3 --- /dev/null +++ b/models/folder_info.json @@ -0,0 +1,54 @@ +{ + "HolasoyGerman": { + "title": "HolasoyGerman", + "folder_path": "HolasoyGerman" + }, + "Homer": { + "title": "Homer", + "folder_path": "Homer" + }, + "Ibai": { + "title": "Ibai", + "folder_path": "Ibai" + }, + "IlloJuan": { + "title": "IlloJuan", + "folder_path": "IlloJuan" + }, + "Joseju": { + "title": "Joseju", + "folder_path": "Joseju" + }, + "Pavloh": { + "title": "Pavloh", + "folder_path": "Pavloh" + }, + "Quevedo": { + "title": "Quevedo", + "folder_path": "Quevedo" + }, + "Shadoune666": { + "title": "Shadoune666", + "folder_path": "Shadoune666" + }, + "Spreen": { + "title": "Spreen", + "folder_path": "Spreen" + }, + "Totakeke": { + "title": "Totakeke", + "folder_path": "Totakeke" + }, + "Vegetta777": { + "title": "Vegetta777", + "folder_path": "Vegetta777" + }, + "Villager": { + "title": "Villager", + "folder_path": "Villager" + }, + "WalterWhite": { + "title": "WalterWhite", + "folder_path": "WalterWhite" + } +} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f92047aedaa2077a82279ce1b2fea77921258a9d --- /dev/null +++ b/requirements.txt @@ -0,0 +1,25 @@ +wheel +PyNaCl +demucs +ffmpeg +asyncio +discord +requests +edge-tts +setuptools +torchcrepe +discord.py +onnxruntime +tensorboard +configparser +tensorboardX +scipy==1.9.3 +httpx==0.23.0 +numba==0.56.4 +numpy==1.23.5 +librosa==0.9.1 +fairseq==0.12.2 +faiss-cpu==1.7.3 +pyworld>=0.3.2 +soundfile>=0.12.1 +praat-parselmouth>=0.4.2 \ No newline at end of file diff --git a/vc_infer_pipeline.py b/vc_infer_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..2e5d17bdc522e3d2757f2accd17258994b40e613 --- /dev/null +++ b/vc_infer_pipeline.py @@ -0,0 +1,230 @@ +from scipy import signal +from functools import lru_cache +import torch.nn.functional as F +import numpy as np, parselmouth, torch +import pyworld, os, traceback, faiss, librosa + +bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) + +@lru_cache +def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period, input_audio_path2wav): + audio = input_audio_path2wav[input_audio_path] + f0, t = pyworld.harvest( + audio, fs=fs, f0_ceil=f0max, f0_floor=f0min, frame_period=frame_period + ) + f0 = pyworld.stonemask(audio, f0, t, fs) + return f0 + +def change_rms(data1, sr1, data2, sr2, rate): + rms1 = librosa.feature.rms(y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2) + rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) + rms1 = torch.from_numpy(rms1).unsqueeze(0) + rms2 = torch.from_numpy(rms2).unsqueeze(0) + rms1 = F.interpolate(rms1, size=data2.shape[0], mode="linear").squeeze() + rms2 = F.interpolate(rms2, size=data2.shape[0], mode="linear").squeeze() + rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) + data2 *= (torch.pow(rms1, 1 - rate) * torch.pow(rms2, rate - 1)).numpy() + + return data2 + +class VC: + def __init__(self, tgt_sr, config): + self.x_pad = config.x_pad + self.x_query = config.x_query + self.x_center = config.x_center + self.x_max = config.x_max + self.is_half = config.is_half + self.sr = 16000 + self.window = 160 + self.t_pad = self.sr * self.x_pad + self.t_pad_tgt = tgt_sr * self.x_pad + self.t_pad2 = self.t_pad * 2 + self.t_query = self.sr * self.x_query + self.t_center = self.sr * self.x_center + self.t_max = self.sr * self.x_max + self.device = config.device + + def get_f0(self, input_audio_path, x, p_len, f0_up_key, f0_method, filter_radius, inp_f0=None): + global input_audio_path2wav + time_step = self.window / self.sr * 1000 + f0_min = 50 + f0_max = 1100 + f0_mel_min = 1127 * np.log(1 + f0_min / 700) + f0_mel_max = 1127 * np.log(1 + f0_max / 700) + + if f0_method == "pm": + f0 = (parselmouth.Sound(x, self.sr) + .to_pitch_ac(time_step=time_step / 1000, voicing_threshold=0.6, pitch_floor=f0_min, + pitch_ceiling=f0_max,) + .selected_array["frequency"]) + pad_size = (p_len - len(f0) + 1) // 2 + if pad_size > 0 or p_len - len(f0) - pad_size > 0: + f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") + f0 *= pow(2, f0_up_key / 12) + tf0 = self.sr // self.window + + if inp_f0 is not None: + delta_t = np.round( + (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 + ).astype("int16") + replace_f0 = np.interp(list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]) + shape = f0[self.x_pad * tf0: self.x_pad * tf0 + len(replace_f0)].shape[0] + f0[self.x_pad * tf0: self.x_pad * tf0 + len(replace_f0)] = replace_f0[:shape] + + f0bak= f0.copy() + f0_mel = 1127 * np.log(1 + f0 / 700) + f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1 + f0_mel[f0_mel <= 1] = 1 + f0_mel[f0_mel > 255] = 255 + f0_coarse = np.rint(f0_mel).astype(np.int) + return f0_coarse, f0bak + + def vc(self, model, net_g, sid, audio0, pitch, pitchf, times, index, big_npy, index_rate, version, protect): + feats = torch.from_numpy(audio0) + feats = feats.half() if self.is_half else feats.float() + + if feats.dim() == 2: + feats = feats.mean(-1) + assert feats.dim() == 1, feats.dim() + feats = feats.view(1, -1) + padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) + + inputs = { "source": feats.to(self.device), "padding_mask": padding_mask, "output_layer": 9 if version == "v1" else 12} + + with torch.no_grad(): + logits = model.extract_features(**inputs) + feats = model.final_proj(logits[0]) if version == "v1" else logits[0] + + if protect < 0.5 and pitch is not None and pitchf is not None: + feats0 = feats.clone() + + if index is not None and big_npy is not None and index_rate != 0: + npy = feats[0].cpu().numpy() + if self.is_half: + npy = npy.astype("float64") + + score, ix = index.search(npy, k=8) + weight = np.square(1 / score) + weight /= weight.sum(axis=1, keepdims=True) + npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) + + if self.is_half: + npy = npy.astype("float16") + feats = ( torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate + (1 - index_rate) * feats) + + feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) + if protect < 0.5 and pitch is not None and pitchf is not None: + feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) + + p_len = audio0.shape[0] // self.window + if feats.shape[1] < p_len: + p_len = feats.shape[1] + if pitch is not None and pitchf is not None: + pitch = pitch[:, :p_len] + pitchf = pitchf[:, :p_len] + + if protect < 0.5 and pitch is not None and pitchf is not None: + pitchff = pitchf.clone() + pitchff[pitchf > 0] = 1 + pitchff[pitchf < 1] = protect + pitchff = pitchff.unsqueeze(-1) + feats = feats * pitchff + feats0 * (1 - pitchff) + feats = feats.to(feats0.dtype) + p_len = torch.tensor([p_len], device=self.device).long() + + with torch.no_grad(): + if pitch is not None and pitchf is not None: + audio1 = ( + (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]) + .data.cpu() + .float() + .numpy() + ) + else: + audio1 = ((net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()) + + del feats, p_len, padding_mask + return audio1 + + def pipeline(self,model, net_g, sid, audio, input_audio_path, times, f0_up_key, f0_method, file_index, index_rate, if_f0, filter_radius, tgt_sr, resample_sr, rms_mix_rate, version, protect, f0_file=None,): + if ( + file_index != "" + and os.path.exists(file_index) == True + and index_rate != 0 + ): + try: + index = faiss.read_index(file_index) + big_npy = index.reconstruct_n(0, index.ntotal) + except: + traceback.print_exc() + index = big_npy = None + else: + index = big_npy = None + audio = signal.filtfilt(bh, ah, audio) + audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") + opt_ts = [] + if audio_pad.shape[0] > self.t_max: + audio_sum = np.zeros_like(audio) + for i in range(self.window): + audio_sum += audio_pad[i : i - self.window] + for t in range(self.t_center, audio.shape[0], self.t_center): + opt_ts.append( + t + - self.t_query + + np.where( + np.abs(audio_sum[t - self.t_query : t + self.t_query]) + == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() + )[0][0] + ) + s = 0 + audio_opt = [] + t = None + audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") + p_len = audio_pad.shape[0] // self.window + inp_f0 = None + if hasattr(f0_file, "name") == True: + try: + with open(f0_file.name, "r") as f: + lines = f.read().strip("\n").split("\n") + inp_f0 = [] + for line in lines: + inp_f0.append([float(i) for i in line.split(",")]) + inp_f0 = np.array(inp_f0, dtype="float64") + except: + traceback.print_exc() + sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() + pitch, pitchf = None, None + if if_f0 == 1: + pitch, pitchf = self.get_f0( + input_audio_path,audio_pad,p_len,f0_up_key,f0_method,filter_radius,inp_f0, + ) + pitch = pitch[:p_len] + pitchf = pitchf[:p_len] + pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() + pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() + for t in opt_ts: + t = t // self.window * self.window + if if_f0 == 1: + audio_opt.append( + self.vc(model, net_g, sid,audio_pad[s : t + self.t_pad2 + self.window], pitch[:, s // self.window : (t + self.t_pad2) // self.window],pitchf[:, s // self.window : (t + self.t_pad2) // self.window],times,index,big_npy,index_rate,version,protect,)[self.t_pad_tgt : -self.t_pad_tgt]) + else: + audio_opt.append(self.vc(model,net_g,sid,audio_pad[s : t + self.t_pad2 + self.window],None,None,times,index,big_npy,index_rate,version,protect)[self.t_pad_tgt : -self.t_pad_tgt]) + s = t + if if_f0 == 1: + audio_opt.append(self.vc(model,net_g,sid,audio_pad[t:],pitch[:, t // self.window :] if t is not None else pitch,pitchf[:, t // self.window :] if t is not None else pitchf,times,index,big_npy,index_rate,version,protect,)[self.t_pad_tgt : -self.t_pad_tgt]) + else: + audio_opt.append(self.vc(model, net_g, sid, audio_pad[t:], None, None, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt]) + audio_opt = np.concatenate(audio_opt) + if rms_mix_rate != 1: + audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) + if resample_sr >= 16000 and tgt_sr != resample_sr: + audio_opt = librosa.resample(audio_opt, orig_sr=tgt_sr, target_sr=resample_sr) + audio_max = np.abs(audio_opt).max() / 0.99 + max_int16 = 32768 + if audio_max > 1: + max_int16 /= audio_max + audio_opt = (audio_opt * max_int16).astype(np.int16) + del pitch, pitchf, sid + if torch.cuda.is_available(): + torch.cuda.empty_cache() + return audio_opt \ No newline at end of file