golem4300 ImPavloh commited on
Commit
0ce8c7d
0 Parent(s):

Duplicate from ImPavloh/RVC-TTS-Demo

Browse files

Co-authored-by: Pablo <ImPavloh@users.noreply.huggingface.co>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +48 -0
  2. README.md +14 -0
  3. app.py +200 -0
  4. config.py +31 -0
  5. hubert_base.pt +3 -0
  6. lib/infer_pack/__pycache__/attentions.cpython-310.pyc +0 -0
  7. lib/infer_pack/__pycache__/commons.cpython-310.pyc +0 -0
  8. lib/infer_pack/__pycache__/models.cpython-310.pyc +0 -0
  9. lib/infer_pack/__pycache__/modules.cpython-310.pyc +0 -0
  10. lib/infer_pack/__pycache__/transforms.cpython-310.pyc +0 -0
  11. lib/infer_pack/attentions.py +253 -0
  12. lib/infer_pack/commons.py +112 -0
  13. lib/infer_pack/models.py +711 -0
  14. lib/infer_pack/models_onnx.py +582 -0
  15. lib/infer_pack/modules.py +315 -0
  16. lib/infer_pack/onnx_inference.py +67 -0
  17. lib/infer_pack/transforms.py +115 -0
  18. models/FernandoAlonso/FernandoAlonso/added_IVF582_Flat_nprobe_1_fernando2_v2.index +3 -0
  19. models/FernandoAlonso/FernandoAlonso/fernando2.pth +3 -0
  20. models/HolasoyGerman/HolasoyGerman/HolasoyGerman.pth +3 -0
  21. models/HolasoyGerman/HolasoyGerman/added_IVF3117_Flat_nprobe_1_HolasoyGerman_v2.index +3 -0
  22. models/HolasoyGerman/model_info.json +9 -0
  23. models/Homer/Homer/HomerEsp.pth +3 -0
  24. models/Homer/Homer/added_IVF360_Flat_nprobe_1_HomerEsp_v1.index +3 -0
  25. models/Homer/model_info.json +9 -0
  26. models/Ibai/Ibai/Ibai.pth +3 -0
  27. models/Ibai/Ibai/added_IVF4601_Flat_nprobe_1_Ibai_v2.index +3 -0
  28. models/Ibai/model_info.json +9 -0
  29. models/IlloJuan/IlloJuan/IlloJuan.pth +3 -0
  30. models/IlloJuan/IlloJuan/added_IVF593_Flat_nprobe_1_IlloJuan_v2.index +3 -0
  31. models/IlloJuan/model_info.json +9 -0
  32. models/Quevedo/Quevedo/added_IVF2301_Flat_nprobe_10.index +3 -0
  33. models/Quevedo/Quevedo/quevedo.pth +3 -0
  34. models/Quevedo/model_info.json +9 -0
  35. models/Shadoune666/Shadoune666/Shadoune666.pth +3 -0
  36. models/Shadoune666/Shadoune666/added_IVF716_Flat_nprobe_1_Shadoune666_v2.index +3 -0
  37. models/Shadoune666/model_info.json +9 -0
  38. models/Spreen/Spreen/Spreen.pth +3 -0
  39. models/Spreen/Spreen/added_IVF4737_Flat_nprobe_1_Spreen_v2.index +3 -0
  40. models/Spreen/model_info.json +9 -0
  41. models/Totakeke/Totakeke/added_IVF256_Flat_nprobe_1_full_totakeke_v2.index +3 -0
  42. models/Totakeke/Totakeke/full_totakeke_e150_s44400.pth +3 -0
  43. models/Totakeke/model_info.json +9 -0
  44. models/Vegetta777/Vegetta777/Vegetta777.pth +3 -0
  45. models/Vegetta777/Vegetta777/added_IVF2021_Flat_nprobe_1_Vegetta777_v2.index +3 -0
  46. models/Vegetta777/model_info.json +9 -0
  47. models/Villager/Villager/added_IVF81_Flat_nprobe_1_v2.index +3 -0
  48. models/Villager/Villager/villager.pth +3 -0
  49. models/Villager/model_info.json +9 -0
  50. models/WalterWhite/WalterWhite/added_IVF458_Flat_nprobe_1_ww2_v2.index +3 -0
.gitattributes ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ models/HolasoyGerman/HolasoyGerman/added_IVF3117_Flat_nprobe_1_HolasoyGerman_v2.index filter=lfs diff=lfs merge=lfs -text
37
+ models/Homer/Homer/added_IVF360_Flat_nprobe_1_HomerEsp_v1.index filter=lfs diff=lfs merge=lfs -text
38
+ models/Ibai/Ibai/added_IVF4601_Flat_nprobe_1_Ibai_v2.index filter=lfs diff=lfs merge=lfs -text
39
+ models/IlloJuan/IlloJuan/added_IVF593_Flat_nprobe_1_IlloJuan_v2.index filter=lfs diff=lfs merge=lfs -text
40
+ models/Joseju/Joseju/added_IVF256_Flat_nprobe_1_Joseju_v2.index filter=lfs diff=lfs merge=lfs -text
41
+ models/Quevedo/Quevedo/added_IVF2301_Flat_nprobe_10.index filter=lfs diff=lfs merge=lfs -text
42
+ models/Shadoune666/Shadoune666/added_IVF716_Flat_nprobe_1_Shadoune666_v2.index filter=lfs diff=lfs merge=lfs -text
43
+ models/Spreen/Spreen/added_IVF4737_Flat_nprobe_1_Spreen_v2.index filter=lfs diff=lfs merge=lfs -text
44
+ models/Totakeke/Totakeke/added_IVF256_Flat_nprobe_1_full_totakeke_v2.index filter=lfs diff=lfs merge=lfs -text
45
+ models/Vegetta777/Vegetta777/added_IVF2021_Flat_nprobe_1_Vegetta777_v2.index filter=lfs diff=lfs merge=lfs -text
46
+ models/Villager/Villager/added_IVF81_Flat_nprobe_1_v2.index filter=lfs diff=lfs merge=lfs -text
47
+ models/WalterWhite/WalterWhite/added_IVF458_Flat_nprobe_1_ww2_v2.index filter=lfs diff=lfs merge=lfs -text
48
+ models/FernandoAlonso/FernandoAlonso/added_IVF582_Flat_nprobe_1_fernando2_v2.index filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: RVC TTS Demo
3
+ emoji: 🚀
4
+ colorFrom: red
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.36.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: gpl-3.0
11
+ duplicated_from: ImPavloh/RVC-TTS-Demo
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import torch
4
+ import asyncio
5
+ import librosa
6
+ import hashlib
7
+ import edge_tts
8
+ import gradio as gr
9
+ from config import Config
10
+ from vc_infer_pipeline import VC
11
+ from fairseq import checkpoint_utils
12
+ from lib.infer_pack.models import (SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono,)
13
+
14
+ config = Config()
15
+
16
+ def load_json_file(filepath):
17
+ with open(filepath, "r", encoding="utf-8") as f: content = json.load(f)
18
+ return content
19
+
20
+ def file_checksum(file_path):
21
+ with open(file_path, 'rb') as f:
22
+ file_data = f.read()
23
+ return hashlib.md5(file_data).hexdigest()
24
+
25
+ def get_existing_model_info(category_directory):
26
+ model_info_path = os.path.join(category_directory, 'model_info.json')
27
+ if os.path.exists(model_info_path):
28
+ with open(model_info_path, 'r') as f: return json.load(f)
29
+ return None
30
+
31
+ def generate_model_info_files():
32
+ folder_info = {}
33
+ model_directory = "models/"
34
+ for category_name in os.listdir(model_directory):
35
+ category_directory = os.path.join(model_directory, category_name)
36
+ if not os.path.isdir(category_directory): continue
37
+
38
+ folder_info[category_name] = {"title": category_name, "folder_path": category_name}
39
+ existing_model_info = get_existing_model_info(category_directory)
40
+ model_info = {}
41
+ regenerate_model_info = False
42
+
43
+ for model_name in os.listdir(category_directory):
44
+ model_path = os.path.join(category_directory, model_name)
45
+ if not os.path.isdir(model_path): continue
46
+
47
+ model_data, regenerate = gather_model_info(category_directory, model_name, model_path, existing_model_info)
48
+ if model_data is not None:
49
+ model_info[model_name] = model_data
50
+ regenerate_model_info |= regenerate
51
+
52
+ if regenerate_model_info:
53
+ with open(os.path.join(category_directory, 'model_info.json'), 'w') as f: json.dump(model_info, f, indent=4)
54
+
55
+ folder_info_path = os.path.join(model_directory, 'folder_info.json')
56
+ with open(folder_info_path, 'w') as f: json.dump(folder_info, f, indent=4)
57
+
58
+ def should_regenerate_model_info(existing_model_info, model_name, pth_checksum, index_checksum):
59
+ if existing_model_info is None or model_name not in existing_model_info: return True
60
+ return (existing_model_info[model_name]['model_path_checksum'] != pth_checksum or existing_model_info[model_name]['index_path_checksum'] != index_checksum)
61
+
62
+ def get_model_files(model_path): return [f for f in os.listdir(model_path) if f.endswith('.pth') or f.endswith('.index')]
63
+
64
+ def gather_model_info(category_directory, model_name, model_path, existing_model_info):
65
+ model_files = get_model_files(model_path)
66
+ if len(model_files) != 2: return None, False
67
+
68
+ pth_file = [f for f in model_files if f.endswith('.pth')][0]
69
+ index_file = [f for f in model_files if f.endswith('.index')][0]
70
+ pth_checksum = file_checksum(os.path.join(model_path, pth_file))
71
+ index_checksum = file_checksum(os.path.join(model_path, index_file))
72
+ regenerate = should_regenerate_model_info(existing_model_info, model_name, pth_checksum, index_checksum)
73
+
74
+ return {"title": model_name, "model_path": pth_file, "feature_retrieval_library": index_file, "model_path_checksum": pth_checksum, "index_path_checksum": index_checksum}, regenerate
75
+
76
+ def create_vc_fn(model_name, tgt_sr, net_g, vc, if_f0, version, file_index):
77
+ def vc_fn(tts_text, tts_voice):
78
+ try:
79
+ if len(tts_text) > 100: return None
80
+ if tts_text is None or tts_voice is None: return None
81
+ asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
82
+ audio, sr = librosa.load("tts.mp3", sr=16000, mono=True)
83
+ vc_input = "tts.mp3"
84
+ times = [0, 0, 0]
85
+ audio_opt = vc.pipeline(hubert_model, net_g, 0, audio, vc_input, times, 0, "pm", file_index, 0.7, if_f0, 3, tgt_sr, 0, 1, version, 0.5, f0_file=None)
86
+ return (tgt_sr, audio_opt)
87
+ except Exception: return None
88
+ return vc_fn
89
+
90
+ def load_model_parameters(category_folder, character_name, info):
91
+ model_index = f"models/{category_folder}/{character_name}/{info['feature_retrieval_library']}"
92
+ cpt = torch.load(f"models/{category_folder}/{character_name}/{info['model_path']}", map_location="cpu")
93
+ return model_index, cpt
94
+
95
+ def select_net_g(cpt, version, if_f0):
96
+ if version == "v1":
97
+ if if_f0 == 1: net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
98
+ else: net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
99
+ elif version == "v2":
100
+ if if_f0 == 1: net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
101
+ else: net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
102
+ return net_g
103
+
104
+ def load_model_and_prepare(cpt, net_g):
105
+ del net_g.enc_q
106
+ net_g.load_state_dict(cpt["weight"], strict=False)
107
+ net_g.eval().to(config.device)
108
+ net_g = net_g.half() if config.is_half else net_g.float()
109
+ return net_g
110
+
111
+ def create_and_append_model(models, model_functions, character_name, model_title, version, vc_fn):
112
+ models.append((character_name, model_title, version, vc_fn))
113
+ model_functions[character_name] = vc_fn
114
+ return models, model_functions
115
+
116
+ def load_model():
117
+ categories = []
118
+ model_functions = {}
119
+ folder_info = load_json_file("models/folder_info.json")
120
+ for category_name, category_info in folder_info.items():
121
+ models = []
122
+ models_info = load_json_file(f"models/{category_info['folder_path']}/model_info.json")
123
+ for character_name, info in models_info.items():
124
+ model_index, cpt = load_model_parameters(category_info['folder_path'], character_name, info)
125
+ net_g = select_net_g(cpt, cpt.get("version", "v1"), cpt.get("f0", 1))
126
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
127
+ net_g = load_model_and_prepare(cpt, net_g)
128
+ vc = VC(cpt["config"][-1], config)
129
+ vc_fn = create_vc_fn(info['model_path'], cpt["config"][-1], net_g, vc, cpt.get("f0", 1), cpt.get("version", "v1"), model_index)
130
+ models, model_functions = create_and_append_model(models, model_functions, character_name, info['title'], cpt.get("version", "v1"), vc_fn)
131
+ categories.append([category_info['title'], category_info['folder_path'], models])
132
+ return categories, model_functions
133
+
134
+ generate_model_info_files()
135
+
136
+ css = """
137
+ .gradio-container { font-family: 'IBM Plex Sans', sans-serif; }
138
+ footer { visibility: hidden; display: none; }
139
+ .center-container { display: flex; flex-direction: column; align-items: center; justify-content: center;}
140
+ """
141
+
142
+ if __name__ == '__main__':
143
+ global hubert_model
144
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(["hubert_base.pt"], suffix="")
145
+ hubert_model = models[0]
146
+ hubert_model = hubert_model.to(config.device)
147
+ hubert_model = hubert_model.half() if config.is_half else hubert_model.float()
148
+ hubert_model.eval()
149
+ categories, model_functions = load_model()
150
+ tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
151
+ voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
152
+ with gr.Blocks(css=css, title="Demo RVC TTS - Pavloh", theme=gr.themes.Soft(primary_hue="cyan", secondary_hue="blue", radius_size="lg", text_size="lg")
153
+ .set(loader_color="#0B0F19", shadow_drop='*shadow_drop_lg', block_border_width="3px")) as pavloh:
154
+ gr.HTML("""
155
+ <div class="center-container">
156
+ <div style="display: flex; justify-content: center;">
157
+ <a href="https://github.com/ImPavloh/rvc-tts/blob/main/LICENSE" target="_blank">
158
+ <img src="https://img.shields.io/github/license/impavloh/voiceit?style=for-the-badge&logo=github&logoColor=white" alt="License">
159
+ </a>
160
+ <a href="https://github.com/ImPavloh/rvc-tts" target="_blank">
161
+ <img src="https://img.shields.io/badge/repository-%23121011.svg?style=for-the-badge&logo=github&logoColor=white" alt="GitHub">
162
+ </a>
163
+ <form action="https://www.paypal.com/donate" method="post" target="_blank">
164
+ <input type="hidden" name="hosted_button_id" value="6FPWP9AWEKSWJ" />
165
+ <input type="image" src="https://img.shields.io/badge/support-%2300457C.svg?style=for-the-badge&logo=paypal&logoColor=white" border="0" name="submit" alt="Donate with PayPal" />
166
+ <img alt="" border="0" src="https://www.paypal.com/es_ES/i/scr/pixel.gif" width="1" height="1" />
167
+ </form>
168
+ <a href="https://twitter.com/impavloh" target="_blank">
169
+ <img src="https://img.shields.io/badge/follow-%231DA1F2.svg?style=for-the-badge&logo=twitter&logoColor=white" alt="Twitter">
170
+ </a>
171
+ </div>
172
+ <div style="display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem;">
173
+ <h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px">🗣️ RVC TTS Demo - <a style="text-decoration: underline;" href="https://twitter.com/impavloh">Pavloh</a></h1>
174
+ </div>
175
+ <p style="margin-bottom: 10px; font-size: 94%; line-height: 23px;">An AI-Powered Text-to-Speech</p>
176
+ <p><b>Try out the <a style="text-decoration: underline;" href="https://github.com/ImPavloh/rvc-tts-discord-bot">RVC Text-to-Speech Discord Bot</a></b></p>
177
+ </div>
178
+ """)
179
+
180
+ with gr.Row():
181
+ with gr.Column():
182
+ m1 = gr.Dropdown(label="📦 Voice Model", choices=list(model_functions.keys()), allow_custom_value=False, value="Ibai")
183
+ t2 = gr.Dropdown(label="⚙️ Voice style and language [Edge-TTS]", choices=voices, allow_custom_value=False, value="es-ES-AlvaroNeural-Male")
184
+ t1 = gr.Textbox(label="📝 Text to convert")
185
+ c1 = gr.Button("Convert", variant="primary")
186
+ a1 = gr.Audio(label="🔉 Converted Text", interactive=False)
187
+
188
+ def call_selected_model_fn(selected_model, t1, t2):
189
+ vc_fn = model_functions[selected_model]
190
+ return vc_fn(t1, t2)
191
+
192
+ c1.click(fn=call_selected_model_fn, inputs=[m1, t1, t2], outputs=[a1])
193
+
194
+ gr.HTML("""
195
+ <center>
196
+ <p><i> By using this website, you agree to the <a style="text-decoration: underline;" href="https://github.com/ImPavloh/rvc-tts/blob/main/LICENSE">license</a>. </i></p>
197
+ </center>
198
+ """)
199
+
200
+ pavloh.queue(concurrency_count=1).launch()
config.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from multiprocessing import cpu_count
3
+
4
+ class Config:
5
+ def __init__(self):
6
+ self.device = "cpu"
7
+ self.is_half = False
8
+ self.n_cpu = cpu_count()
9
+ (self.python_cmd, self.colab, self.noparallel, self.noautoopen, self.api) = self.arg_parse()
10
+ self.listen_port = 7860
11
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
12
+
13
+ @staticmethod
14
+ def arg_parse() -> tuple:
15
+ parser = argparse.ArgumentParser()
16
+ parser.add_argument("--pycmd", type=str, default="python")
17
+ parser.add_argument("--colab", action="store_true")
18
+ parser.add_argument("--noparallel", action="store_true")
19
+ parser.add_argument("--noautoopen", action="store_true")
20
+ parser.add_argument("--api", action="store_true")
21
+ cmd_opts = parser.parse_args()
22
+
23
+ return (cmd_opts.pycmd, cmd_opts.colab, cmd_opts.noparallel, cmd_opts.noautoopen, cmd_opts.api)
24
+
25
+ def device_config(self) -> tuple:
26
+ x_pad = 1
27
+ x_query = 6
28
+ x_center = 38
29
+ x_max = 41
30
+
31
+ return x_pad, x_query, x_center, x_max
hubert_base.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f54b40fd2802423a5643779c4861af1e9ee9c1564dc9d32f54f20b5ffba7db96
3
+ size 189507909
lib/infer_pack/__pycache__/attentions.cpython-310.pyc ADDED
Binary file (9.24 kB). View file
 
lib/infer_pack/__pycache__/commons.cpython-310.pyc ADDED
Binary file (5.08 kB). View file
 
lib/infer_pack/__pycache__/models.cpython-310.pyc ADDED
Binary file (19.5 kB). View file
 
lib/infer_pack/__pycache__/modules.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
lib/infer_pack/__pycache__/transforms.cpython-310.pyc ADDED
Binary file (4.35 kB). View file
 
lib/infer_pack/attentions.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import torch
5
+ from torch import nn
6
+ from torch.nn import functional as F
7
+ from lib.infer_pack import commons
8
+ from lib.infer_pack import modules
9
+ from lib.infer_pack.modules import LayerNorm
10
+
11
+ class Encoder(nn.Module):
12
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0.0, window_size=10, **kwargs):
13
+ super().__init__()
14
+ self.hidden_channels = hidden_channels
15
+ self.filter_channels = filter_channels
16
+ self.n_heads = n_heads
17
+ self.n_layers = n_layers
18
+ self.kernel_size = kernel_size
19
+ self.p_dropout = p_dropout
20
+ self.window_size = window_size
21
+ self.drop = nn.Dropout(p_dropout)
22
+ self.attn_layers = nn.ModuleList()
23
+ self.norm_layers_1 = nn.ModuleList()
24
+ self.ffn_layers = nn.ModuleList()
25
+ self.norm_layers_2 = nn.ModuleList()
26
+ for _ in range(self.n_layers):
27
+ self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
28
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
29
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
30
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
31
+
32
+ def forward(self, x, x_mask):
33
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
34
+ x = x * x_mask
35
+ for i in range(self.n_layers):
36
+ y = self.attn_layers[i](x, x, attn_mask)
37
+ y = self.drop(y)
38
+ x = self.norm_layers_1[i](x + y)
39
+ y = self.ffn_layers[i](x, x_mask)
40
+ y = self.drop(y)
41
+ x = self.norm_layers_2[i](x + y)
42
+ x = x * x_mask
43
+ return x
44
+
45
+ class Decoder(nn.Module):
46
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0.0, proximal_bias=False, proximal_init=True, **kwargs):
47
+ super().__init__()
48
+ self.hidden_channels = hidden_channels
49
+ self.filter_channels = filter_channels
50
+ self.n_heads = n_heads
51
+ self.n_layers = n_layers
52
+ self.kernel_size = kernel_size
53
+ self.p_dropout = p_dropout
54
+ self.proximal_bias = proximal_bias
55
+ self.proximal_init = proximal_init
56
+ self.drop = nn.Dropout(p_dropout)
57
+ self.self_attn_layers = nn.ModuleList()
58
+ self.norm_layers_0 = nn.ModuleList()
59
+ self.encdec_attn_layers = nn.ModuleList()
60
+ self.norm_layers_1 = nn.ModuleList()
61
+ self.ffn_layers = nn.ModuleList()
62
+ self.norm_layers_2 = nn.ModuleList()
63
+ for _ in range(self.n_layers):
64
+ self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
65
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
66
+ self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
67
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
68
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
69
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
70
+
71
+ def forward(self, x, x_mask, h, h_mask):
72
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
73
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
74
+ x = x * x_mask
75
+ for i in range(self.n_layers):
76
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
77
+ y = self.drop(y)
78
+ x = self.norm_layers_0[i](x + y)
79
+
80
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
81
+ y = self.drop(y)
82
+ x = self.norm_layers_1[i](x + y)
83
+
84
+ y = self.ffn_layers[i](x, x_mask)
85
+ y = self.drop(y)
86
+ x = self.norm_layers_2[i](x + y)
87
+ x = x * x_mask
88
+ return x
89
+
90
+
91
+ class MultiHeadAttention(nn.Module):
92
+ def __init__(self, channels, out_channels, n_heads, p_dropout=0.0, window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
93
+ super().__init__()
94
+ assert channels % n_heads == 0
95
+
96
+ self.channels = channels
97
+ self.out_channels = out_channels
98
+ self.n_heads = n_heads
99
+ self.p_dropout = p_dropout
100
+ self.window_size = window_size
101
+ self.heads_share = heads_share
102
+ self.block_length = block_length
103
+ self.proximal_bias = proximal_bias
104
+ self.proximal_init = proximal_init
105
+ self.attn = None
106
+
107
+ self.k_channels = channels // n_heads
108
+ self.conv_q = nn.Conv1d(channels, channels, 1)
109
+ self.conv_k = nn.Conv1d(channels, channels, 1)
110
+ self.conv_v = nn.Conv1d(channels, channels, 1)
111
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
112
+ self.drop = nn.Dropout(p_dropout)
113
+
114
+ if window_size is not None:
115
+ n_heads_rel = 1 if heads_share else n_heads
116
+ rel_stddev = self.k_channels**-0.5
117
+ self.emb_rel_k = nn.Parameter(
118
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
119
+ * rel_stddev
120
+ )
121
+ self.emb_rel_v = nn.Parameter(
122
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
123
+ * rel_stddev
124
+ )
125
+
126
+ nn.init.xavier_uniform_(self.conv_q.weight)
127
+ nn.init.xavier_uniform_(self.conv_k.weight)
128
+ nn.init.xavier_uniform_(self.conv_v.weight)
129
+ if proximal_init:
130
+ with torch.no_grad():
131
+ self.conv_k.weight.copy_(self.conv_q.weight)
132
+ self.conv_k.bias.copy_(self.conv_q.bias)
133
+
134
+ def forward(self, x, c, attn_mask=None):
135
+ q = self.conv_q(x)
136
+ k = self.conv_k(c)
137
+ v = self.conv_v(c)
138
+
139
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
140
+
141
+ x = self.conv_o(x)
142
+ return x
143
+
144
+ def attention(self, query, key, value, mask=None):
145
+ b, d, t_s, t_t = (*key.size(), query.size(2))
146
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
147
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
148
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
149
+
150
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
151
+ if self.window_size is not None:
152
+ assert (
153
+ t_s == t_t
154
+ ), "Relative attention is only available for self-attention."
155
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
156
+ rel_logits = self._matmul_with_relative_keys(query / math.sqrt(self.k_channels), key_relative_embeddings)
157
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
158
+ scores = scores + scores_local
159
+ if self.proximal_bias:
160
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
161
+ scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
162
+ if mask is not None:
163
+ scores = scores.masked_fill(mask == 0, -1e4)
164
+ if self.block_length is not None:
165
+ assert (t_s == t_t), "Local attention is only available for self-attention."
166
+ block_mask = (
167
+ torch.ones_like(scores)
168
+ .triu(-self.block_length)
169
+ .tril(self.block_length)
170
+ )
171
+ scores = scores.masked_fill(block_mask == 0, -1e4)
172
+ p_attn = F.softmax(scores, dim=-1)
173
+ p_attn = self.drop(p_attn)
174
+ output = torch.matmul(p_attn, value)
175
+ if self.window_size is not None:
176
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
177
+ value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
178
+ output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
179
+ output = (output.transpose(2, 3).contiguous().view(b, d, t_t))
180
+ return output, p_attn
181
+
182
+ def _matmul_with_relative_values(self, x, y): return torch.matmul(x, y.unsqueeze(0))
183
+ def _matmul_with_relative_keys(self, x, y): return torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
184
+
185
+ def _get_relative_embeddings(self, relative_embeddings, length):
186
+ max_relative_position = 2 * self.window_size + 1
187
+ pad_length = max(length - (self.window_size + 1), 0)
188
+ slice_start_position = max((self.window_size + 1) - length, 0)
189
+ slice_end_position = slice_start_position + 2 * length - 1
190
+ if pad_length > 0:padded_relative_embeddings = F.pad(relative_embeddings, commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
191
+ else:
192
+ padded_relative_embeddings = relative_embeddings
193
+ return padded_relative_embeddings[:, slice_start_position:slice_end_position]
194
+
195
+ def _relative_position_to_absolute_position(self, x):
196
+ batch, heads, length, _ = x.size()
197
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
198
+ x_flat = x.view([batch, heads, length * 2 * length])
199
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]))
200
+
201
+ return x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1 :]
202
+
203
+ def _absolute_position_to_relative_position(self, x):
204
+ batch, heads, length, _ = x.size()
205
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
206
+ x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
207
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
208
+ return x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
209
+
210
+ def _attention_bias_proximal(self, length):
211
+ r = torch.arange(length, dtype=torch.float32)
212
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
213
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
214
+
215
+ class FFN(nn.Module):
216
+ def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0, activation=None, causal=False):
217
+ super().__init__()
218
+ self.in_channels = in_channels
219
+ self.out_channels = out_channels
220
+ self.filter_channels = filter_channels
221
+ self.kernel_size = kernel_size
222
+ self.p_dropout = p_dropout
223
+ self.activation = activation
224
+ self.causal = causal
225
+ self.padding = self._causal_padding if causal else self._same_padding
226
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
227
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
228
+ self.drop = nn.Dropout(p_dropout)
229
+
230
+ def forward(self, x, x_mask):
231
+ x = self.conv_1(self.padding(x * x_mask))
232
+ if self.activation == "gelu": x = x * torch.sigmoid(1.702 * x)
233
+ else: x = torch.relu(x)
234
+ x = self.drop(x)
235
+ x = self.conv_2(self.padding(x * x_mask))
236
+ return x * x_mask
237
+
238
+ def _causal_padding(self, x):
239
+ if self.kernel_size == 1: return x
240
+ pad_l = self.kernel_size - 1
241
+ pad_r = 0
242
+ return self._extracted_from__same_padding_5(pad_l, pad_r, x)
243
+
244
+ def _same_padding(self, x):
245
+ if self.kernel_size == 1: return x
246
+ pad_l = (self.kernel_size - 1) // 2
247
+ pad_r = self.kernel_size // 2
248
+ return self._extracted_from__same_padding_5(pad_l, pad_r, x)
249
+
250
+ def _extracted_from__same_padding_5(self, pad_l, pad_r, x):
251
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
252
+ x = F.pad(x, commons.convert_pad_shape(padding))
253
+ return x
lib/infer_pack/commons.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+ def init_weights(m, mean=0.0, std=0.01):
8
+ classname = m.__class__.__name__
9
+ if "Conv" in classname: m.weight.data.normal_(mean, std)
10
+
11
+ def get_padding(kernel_size, dilation=1):
12
+ return (kernel_size * dilation - dilation) // 2
13
+
14
+ def convert_pad_shape(pad_shape):
15
+ l = pad_shape[::-1]
16
+ return [item for sublist in l for item in sublist]
17
+
18
+ def kl_divergence(m_p, logs_p, m_q, logs_q):
19
+ kl = logs_q - logs_p - 0.5
20
+ kl += 0.5 * (torch.exp(2.0 * logs_p) + (m_p - m_q) ** 2) * torch.exp(-2.0 * logs_q)
21
+ return kl
22
+
23
+ def rand_gumbel(shape):
24
+ return -torch.log(-torch.log(torch.rand(shape) + 1e-5))
25
+
26
+ def rand_gumbel_like(x):
27
+ return rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
28
+
29
+ def slice_segments(x, ids_str, segment_size=4, slice_dim=2):
30
+ if slice_dim == 1: ret = torch.zeros_like(x[:, :segment_size])
31
+ else: ret = torch.zeros_like(x[:, :, :segment_size])
32
+
33
+ for i in range(x.size(0)):
34
+ idx_str = ids_str[i]
35
+ idx_end = idx_str + segment_size
36
+ ret[i] = x[i, ..., idx_str:idx_end]
37
+ return ret
38
+
39
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
40
+ b, d, t = x.size()
41
+
42
+ if x_lengths is None: x_lengths = t
43
+
44
+ ids_str_max = x_lengths - segment_size + 1
45
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
46
+ ret = slice_segments(x, ids_str, segment_size)
47
+ return ret, ids_str
48
+
49
+ def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
50
+ position = torch.arange(length, dtype=torch.float)
51
+ num_timescales = channels // 2
52
+ log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (num_timescales - 1)
53
+ inv_timescales = min_timescale * torch.exp(torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
54
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
55
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
56
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
57
+ signal = signal.view(1, channels, length)
58
+ return signal
59
+
60
+ def apply_timing_signal_1d(x, operation='add', min_timescale=1.0, max_timescale=1.0e4):
61
+ b, channels, length = x.size()
62
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
63
+ signal = signal.to(dtype=x.dtype, device=x.device)
64
+
65
+ if operation == 'add': return x + signal
66
+ elif operation == 'cat': return torch.cat([x, signal], axis=1)
67
+
68
+ def subsequent_mask(length):
69
+ return torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
70
+
71
+ @torch.jit.script
72
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
73
+ n_channels_int = n_channels[0]
74
+ in_act = input_a + input_b
75
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
76
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
77
+ return t_act * s_act
78
+
79
+ def shift_1d(x):
80
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
81
+ return x
82
+
83
+ def sequence_mask(length, max_length=None):
84
+ if max_length is None: max_length = length.max()
85
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
86
+ return x.unsqueeze(0) < length.unsqueeze(1)
87
+
88
+ def generate_path(duration, mask):
89
+ device = duration.device
90
+ b, _, t_y, t_x = mask.shape
91
+ cum_duration = torch.cumsum(duration, -1)
92
+ cum_duration_flat = cum_duration.view(b * t_x)
93
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
94
+ path = path.view(b, t_x, t_y)
95
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
96
+ path = path.unsqueeze(1).transpose(2, 3) * mask
97
+ return path
98
+
99
+ def clip_grad_value_(parameters, clip_value, norm_type=2):
100
+ if isinstance(parameters, torch.Tensor): parameters = [parameters]
101
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
102
+ norm_type = float(norm_type)
103
+
104
+ if clip_value is not None: clip_value = float(clip_value)
105
+
106
+ total_norm = 0
107
+ for p in parameters:
108
+ param_norm = p.grad.data.norm(norm_type)
109
+ total_norm += param_norm.item() ** norm_type
110
+ if clip_value is not None: p.grad.data.clamp_(min=-clip_value, max=clip_value)
111
+ total_norm = total_norm ** (1.0 / norm_type)
112
+ return total_norm
lib/infer_pack/models.py ADDED
@@ -0,0 +1,711 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+ from lib.infer_pack import modules
6
+ from lib.infer_pack import attentions
7
+ from lib.infer_pack import commons
8
+ from lib.infer_pack.commons import init_weights, get_padding
9
+ from torch.nn import Conv1d, ConvTranspose1d, Conv2d
10
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
11
+ from lib.infer_pack.commons import init_weights
12
+ import numpy as np
13
+ from lib.infer_pack import commons
14
+
15
+ class TextEncoder256(nn.Module):
16
+ def __init__(self, out_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, f0=True):
17
+ super().__init__()
18
+ self.out_channels = out_channels
19
+ self.hidden_channels = hidden_channels
20
+ self.filter_channels = filter_channels
21
+ self.n_heads = n_heads
22
+ self.n_layers = n_layers
23
+ self.kernel_size = kernel_size
24
+ self.p_dropout = p_dropout
25
+ self.emb_phone = nn.Linear(256, hidden_channels)
26
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
27
+ if f0 == True: self.emb_pitch = nn.Embedding(256, hidden_channels)
28
+ self.encoder = attentions.Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout)
29
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
30
+
31
+ def forward(self, phone, pitch, lengths):
32
+ if pitch is None: x = self.emb_phone(phone)
33
+ else: x = self.emb_phone(phone) + self.emb_pitch(pitch)
34
+ x = x * math.sqrt(self.hidden_channels)
35
+ x = self.lrelu(x)
36
+ x = torch.transpose(x, 1, -1)
37
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(x.dtype)
38
+ x = self.encoder(x * x_mask, x_mask)
39
+ stats = self.proj(x) * x_mask
40
+ m, logs = torch.split(stats, self.out_channels, dim=1)
41
+ return m, logs, x_mask
42
+
43
+ class TextEncoder768(nn.Module):
44
+ def __init__(self, out_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, f0=True):
45
+ super().__init__()
46
+ self.out_channels = out_channels
47
+ self.hidden_channels = hidden_channels
48
+ self.filter_channels = filter_channels
49
+ self.n_heads = n_heads
50
+ self.n_layers = n_layers
51
+ self.kernel_size = kernel_size
52
+ self.p_dropout = p_dropout
53
+ self.emb_phone = nn.Linear(768, hidden_channels)
54
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
55
+ if f0 == True: self.emb_pitch = nn.Embedding(256, hidden_channels)
56
+ self.encoder = attentions.Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout)
57
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
58
+
59
+ def forward(self, phone, pitch, lengths):
60
+ if pitch is None: x = self.emb_phone(phone)
61
+ else: x = self.emb_phone(phone) + self.emb_pitch(pitch)
62
+ x = x * math.sqrt(self.hidden_channels)
63
+ x = self.lrelu(x)
64
+ x = torch.transpose(x, 1, -1)
65
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(x.dtype)
66
+ x = self.encoder(x * x_mask, x_mask)
67
+ stats = self.proj(x) * x_mask
68
+ m, logs = torch.split(stats, self.out_channels, dim=1)
69
+ return m, logs, x_mask
70
+
71
+ class ResidualCouplingBlock(nn.Module):
72
+ def __init__(self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, n_flows=4, gin_channels=0):
73
+ super().__init__()
74
+ self.channels = channels
75
+ self.hidden_channels = hidden_channels
76
+ self.kernel_size = kernel_size
77
+ self.dilation_rate = dilation_rate
78
+ self.n_layers = n_layers
79
+ self.n_flows = n_flows
80
+ self.gin_channels = gin_channels
81
+ self.flows = nn.ModuleList()
82
+ for _ in range(n_flows):
83
+ self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
84
+ self.flows.append(modules.Flip())
85
+
86
+ def forward(self, x, x_mask, g=None, reverse=False):
87
+ if not reverse:
88
+ for flow in self.flows: x, _ = flow(x, x_mask, g=g, reverse=reverse)
89
+ else:
90
+ for flow in reversed(self.flows): x = flow(x, x_mask, g=g, reverse=reverse)
91
+ return x
92
+
93
+ def remove_weight_norm(self):
94
+ for i in range(self.n_flows): self.flows[i * 2].remove_weight_norm()
95
+
96
+ class PosteriorEncoder(nn.Module):
97
+ def __init__(
98
+ self,
99
+ in_channels,
100
+ out_channels,
101
+ hidden_channels,
102
+ kernel_size,
103
+ dilation_rate,
104
+ n_layers,
105
+ gin_channels=0,
106
+ ):
107
+ super().__init__()
108
+ self.in_channels = in_channels
109
+ self.out_channels = out_channels
110
+ self.hidden_channels = hidden_channels
111
+ self.kernel_size = kernel_size
112
+ self.dilation_rate = dilation_rate
113
+ self.n_layers = n_layers
114
+ self.gin_channels = gin_channels
115
+
116
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
117
+ self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
118
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
119
+
120
+ def forward(self, x, x_lengths, g=None):
121
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
122
+ x = self.pre(x) * x_mask
123
+ x = self.enc(x, x_mask, g=g)
124
+ stats = self.proj(x) * x_mask
125
+ m, logs = torch.split(stats, self.out_channels, dim=1)
126
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
127
+ return z, m, logs, x_mask
128
+
129
+ def remove_weight_norm(self):
130
+ self.enc.remove_weight_norm()
131
+
132
+ class Generator(torch.nn.Module):
133
+ def __init__(
134
+ self,
135
+ initial_channel,
136
+ resblock,
137
+ resblock_kernel_sizes,
138
+ resblock_dilation_sizes,
139
+ upsample_rates,
140
+ upsample_initial_channel,
141
+ upsample_kernel_sizes,
142
+ gin_channels=0,
143
+ ):
144
+ super(Generator, self).__init__()
145
+ self.num_kernels = len(resblock_kernel_sizes)
146
+ self.num_upsamples = len(upsample_rates)
147
+ self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
148
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
149
+
150
+ self.ups = nn.ModuleList()
151
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): self.ups.append(weight_norm(ConvTranspose1d(upsample_initial_channel // (2**i), upsample_initial_channel // (2 ** (i + 1)), k, u, padding=(k - u) // 2)))
152
+
153
+ self.resblocks = nn.ModuleList()
154
+ for i in range(len(self.ups)):
155
+ ch = upsample_initial_channel // (2 ** (i + 1))
156
+ for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes): self.resblocks.append(resblock(ch, k, d))
157
+
158
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
159
+ self.ups.apply(init_weights)
160
+
161
+ if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
162
+
163
+ def forward(self, x, g=None):
164
+ x = self.conv_pre(x)
165
+ if g is not None: x = x + self.cond(g)
166
+
167
+ for i in range(self.num_upsamples):
168
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
169
+ x = self.ups[i](x)
170
+ xs = None
171
+ for j in range(self.num_kernels):
172
+ if xs is None: xs = self.resblocks[i * self.num_kernels + j](x)
173
+ else: xs += self.resblocks[i * self.num_kernels + j](x)
174
+ x = xs / self.num_kernels
175
+ x = F.leaky_relu(x)
176
+ x = self.conv_post(x)
177
+ x = torch.tanh(x)
178
+ return x
179
+
180
+ def remove_weight_norm(self):
181
+ for l in self.ups: remove_weight_norm(l)
182
+ for l in self.resblocks: l.remove_weight_norm()
183
+
184
+ class SineGen(torch.nn.Module):
185
+ def __init__(self, samp_rate, harmonic_num=0, sine_amp=0.1, noise_std=0.003, voiced_threshold=0):
186
+ super(SineGen, self).__init__()
187
+ self.sine_amp = sine_amp
188
+ self.noise_std = noise_std
189
+ self.harmonic_num = harmonic_num
190
+ self.dim = self.harmonic_num + 1
191
+ self.sampling_rate = samp_rate
192
+ self.voiced_threshold = voiced_threshold
193
+
194
+ def _f02uv(self, f0):
195
+ uv = torch.ones_like(f0)
196
+ uv = uv * (f0 > self.voiced_threshold)
197
+ return uv
198
+
199
+ def forward(self, f0, upp):
200
+ with torch.no_grad():
201
+ f0 = f0[:, None].transpose(1, 2)
202
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
203
+ f0_buf[:, :, 0] = f0[:, :, 0]
204
+ for idx in np.arange(self.harmonic_num): f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
205
+ rad_values = (f0_buf / self.sampling_rate) % 1
206
+ rand_ini = torch.rand(f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device)
207
+ rand_ini[:, 0] = 0
208
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
209
+ tmp_over_one = torch.cumsum(rad_values, 1)
210
+ tmp_over_one *= upp
211
+ tmp_over_one = F.interpolate(tmp_over_one.transpose(2, 1), scale_factor=upp, mode="linear", align_corners=True).transpose(2, 1)
212
+ rad_values = F.interpolate(rad_values.transpose(2, 1), scale_factor=upp, mode="nearest").transpose(2, 1)
213
+ tmp_over_one %= 1
214
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
215
+ cumsum_shift = torch.zeros_like(rad_values)
216
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
217
+ sine_waves = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi)
218
+ sine_waves = sine_waves * self.sine_amp
219
+ uv = self._f02uv(f0)
220
+ uv = F.interpolate(uv.transpose(2, 1), scale_factor=upp, mode="nearest").transpose(2, 1)
221
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
222
+ noise = noise_amp * torch.randn_like(sine_waves)
223
+ sine_waves = sine_waves * uv + noise
224
+ return sine_waves, uv, noise
225
+
226
+
227
+ class SourceModuleHnNSF(torch.nn.Module):
228
+ def __init__(
229
+ self,
230
+ sampling_rate,
231
+ harmonic_num=0,
232
+ sine_amp=0.1,
233
+ add_noise_std=0.003,
234
+ voiced_threshod=0,
235
+ is_half=True,
236
+ ):
237
+ super(SourceModuleHnNSF, self).__init__()
238
+
239
+ self.sine_amp = sine_amp
240
+ self.noise_std = add_noise_std
241
+ self.is_half = is_half
242
+ self.l_sin_gen = SineGen(sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod)
243
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
244
+ self.l_tanh = torch.nn.Tanh()
245
+
246
+ def forward(self, x, upp=None):
247
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
248
+ if self.is_half: sine_wavs = sine_wavs.half()
249
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
250
+ return sine_merge, None, None
251
+
252
+
253
+ class GeneratorNSF(torch.nn.Module):
254
+ def __init__(
255
+ self,
256
+ initial_channel,
257
+ resblock,
258
+ resblock_kernel_sizes,
259
+ resblock_dilation_sizes,
260
+ upsample_rates,
261
+ upsample_initial_channel,
262
+ upsample_kernel_sizes,
263
+ gin_channels,
264
+ sr,
265
+ is_half=False,
266
+ ):
267
+ super(GeneratorNSF, self).__init__()
268
+ self.num_kernels = len(resblock_kernel_sizes)
269
+ self.num_upsamples = len(upsample_rates)
270
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
271
+ self.m_source = SourceModuleHnNSF(sampling_rate=sr, harmonic_num=0, is_half=is_half)
272
+ self.noise_convs = nn.ModuleList()
273
+ self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
274
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
275
+
276
+ self.ups = nn.ModuleList()
277
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
278
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
279
+ self.ups.append(
280
+ weight_norm(ConvTranspose1d(upsample_initial_channel // (2**i), upsample_initial_channel // (2 ** (i + 1)), k, u, padding=(k - u) // 2)))
281
+ if i + 1 < len(upsample_rates):
282
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
283
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2))
284
+ else: self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
285
+
286
+ self.resblocks = nn.ModuleList()
287
+ for i in range(len(self.ups)):
288
+ ch = upsample_initial_channel // (2 ** (i + 1))
289
+ for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes): self.resblocks.append(resblock(ch, k, d))
290
+
291
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
292
+ self.ups.apply(init_weights)
293
+
294
+ if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
295
+
296
+ self.upp = np.prod(upsample_rates)
297
+
298
+ def forward(self, x, f0, g=None):
299
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
300
+ har_source = har_source.transpose(1, 2)
301
+ x = self.conv_pre(x)
302
+ if g is not None: x = x + self.cond(g)
303
+
304
+ for i in range(self.num_upsamples):
305
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
306
+ x = self.ups[i](x)
307
+ x_source = self.noise_convs[i](har_source)
308
+ x = x + x_source
309
+ xs = None
310
+ for j in range(self.num_kernels):
311
+ if xs is None: xs = self.resblocks[i * self.num_kernels + j](x)
312
+ else: xs += self.resblocks[i * self.num_kernels + j](x)
313
+ x = xs / self.num_kernels
314
+ x = F.leaky_relu(x)
315
+ x = self.conv_post(x)
316
+ x = torch.tanh(x)
317
+ return x
318
+
319
+ def remove_weight_norm(self):
320
+ for l in self.ups: remove_weight_norm(l)
321
+ for l in self.resblocks: l.remove_weight_norm()
322
+
323
+ sr2sr = {
324
+ "32k": 32000,
325
+ "40k": 40000,
326
+ "48k": 48000,
327
+ }
328
+
329
+ class SynthesizerTrnMs256NSFsid(nn.Module):
330
+ def __init__(
331
+ self,
332
+ spec_channels,
333
+ segment_size,
334
+ inter_channels,
335
+ hidden_channels,
336
+ filter_channels,
337
+ n_heads,
338
+ n_layers,
339
+ kernel_size,
340
+ p_dropout,
341
+ resblock,
342
+ resblock_kernel_sizes,
343
+ resblock_dilation_sizes,
344
+ upsample_rates,
345
+ upsample_initial_channel,
346
+ upsample_kernel_sizes,
347
+ spk_embed_dim,
348
+ gin_channels,
349
+ sr,
350
+ **kwargs
351
+ ):
352
+ super().__init__()
353
+ if type(sr) == type("strr"): sr = sr2sr[sr]
354
+ self.spec_channels = spec_channels
355
+ self.inter_channels = inter_channels
356
+ self.hidden_channels = hidden_channels
357
+ self.filter_channels = filter_channels
358
+ self.n_heads = n_heads
359
+ self.n_layers = n_layers
360
+ self.kernel_size = kernel_size
361
+ self.p_dropout = p_dropout
362
+ self.resblock = resblock
363
+ self.resblock_kernel_sizes = resblock_kernel_sizes
364
+ self.resblock_dilation_sizes = resblock_dilation_sizes
365
+ self.upsample_rates = upsample_rates
366
+ self.upsample_initial_channel = upsample_initial_channel
367
+ self.upsample_kernel_sizes = upsample_kernel_sizes
368
+ self.segment_size = segment_size
369
+ self.gin_channels = gin_channels
370
+ self.spk_embed_dim = spk_embed_dim
371
+ self.enc_p = TextEncoder256(inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout)
372
+ self.dec = GeneratorNSF(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels, sr=sr, is_half=kwargs["is_half"])
373
+ self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
374
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels)
375
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
376
+
377
+ def remove_weight_norm(self):
378
+ self.dec.remove_weight_norm()
379
+ self.flow.remove_weight_norm()
380
+ self.enc_q.remove_weight_norm()
381
+
382
+ def forward(self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds):
383
+ g = self.emb_g(ds).unsqueeze(-1)
384
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
385
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
386
+ z_p = self.flow(z, y_mask, g=g)
387
+ z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
388
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
389
+ o = self.dec(z_slice, pitchf, g=g)
390
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
391
+
392
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
393
+ g = self.emb_g(sid).unsqueeze(-1)
394
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
395
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
396
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
397
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
398
+ return o, x_mask, (z, z_p, m_p, logs_p)
399
+
400
+
401
+ class SynthesizerTrnMs768NSFsid(nn.Module):
402
+ def __init__(
403
+ self,
404
+ spec_channels,
405
+ segment_size,
406
+ inter_channels,
407
+ hidden_channels,
408
+ filter_channels,
409
+ n_heads,
410
+ n_layers,
411
+ kernel_size,
412
+ p_dropout,
413
+ resblock,
414
+ resblock_kernel_sizes,
415
+ resblock_dilation_sizes,
416
+ upsample_rates,
417
+ upsample_initial_channel,
418
+ upsample_kernel_sizes,
419
+ spk_embed_dim,
420
+ gin_channels,
421
+ sr,
422
+ **kwargs
423
+ ):
424
+ super().__init__()
425
+ if type(sr) == type("strr"): sr = sr2sr[sr]
426
+ self.spec_channels = spec_channels
427
+ self.inter_channels = inter_channels
428
+ self.hidden_channels = hidden_channels
429
+ self.filter_channels = filter_channels
430
+ self.n_heads = n_heads
431
+ self.n_layers = n_layers
432
+ self.kernel_size = kernel_size
433
+ self.p_dropout = p_dropout
434
+ self.resblock = resblock
435
+ self.resblock_kernel_sizes = resblock_kernel_sizes
436
+ self.resblock_dilation_sizes = resblock_dilation_sizes
437
+ self.upsample_rates = upsample_rates
438
+ self.upsample_initial_channel = upsample_initial_channel
439
+ self.upsample_kernel_sizes = upsample_kernel_sizes
440
+ self.segment_size = segment_size
441
+ self.gin_channels = gin_channels
442
+ self.spk_embed_dim = spk_embed_dim
443
+ self.enc_p = TextEncoder768(inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout)
444
+ self.dec = GeneratorNSF(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels, sr=sr, is_half=kwargs["is_half"])
445
+ self.enc_q = PosteriorEncoder(spec_channels,inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
446
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels)
447
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
448
+
449
+ def remove_weight_norm(self):
450
+ self.dec.remove_weight_norm()
451
+ self.flow.remove_weight_norm()
452
+ self.enc_q.remove_weight_norm()
453
+
454
+ def forward(self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds):
455
+ g = self.emb_g(ds).unsqueeze(-1)
456
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
457
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
458
+ z_p = self.flow(z, y_mask, g=g)
459
+ z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
460
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
461
+ o = self.dec(z_slice, pitchf, g=g)
462
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
463
+
464
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
465
+ g = self.emb_g(sid).unsqueeze(-1)
466
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
467
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
468
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
469
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
470
+ return o, x_mask, (z, z_p, m_p, logs_p)
471
+
472
+ class SynthesizerTrnMs256NSFsid_nono(nn.Module):
473
+ def __init__(
474
+ self,
475
+ spec_channels,
476
+ segment_size,
477
+ inter_channels,
478
+ hidden_channels,
479
+ filter_channels,
480
+ n_heads,
481
+ n_layers,
482
+ kernel_size,
483
+ p_dropout,
484
+ resblock,
485
+ resblock_kernel_sizes,
486
+ resblock_dilation_sizes,
487
+ upsample_rates,
488
+ upsample_initial_channel,
489
+ upsample_kernel_sizes,
490
+ spk_embed_dim,
491
+ gin_channels,
492
+ sr=None,
493
+ **kwargs
494
+ ):
495
+ super().__init__()
496
+ self.spec_channels = spec_channels
497
+ self.inter_channels = inter_channels
498
+ self.hidden_channels = hidden_channels
499
+ self.filter_channels = filter_channels
500
+ self.n_heads = n_heads
501
+ self.n_layers = n_layers
502
+ self.kernel_size = kernel_size
503
+ self.p_dropout = p_dropout
504
+ self.resblock = resblock
505
+ self.resblock_kernel_sizes = resblock_kernel_sizes
506
+ self.resblock_dilation_sizes = resblock_dilation_sizes
507
+ self.upsample_rates = upsample_rates
508
+ self.upsample_initial_channel = upsample_initial_channel
509
+ self.upsample_kernel_sizes = upsample_kernel_sizes
510
+ self.segment_size = segment_size
511
+ self.gin_channels = gin_channels
512
+ self.spk_embed_dim = spk_embed_dim
513
+ self.enc_p = TextEncoder256(inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, f0=False)
514
+ self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
515
+ self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
516
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels)
517
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
518
+
519
+ def remove_weight_norm(self):
520
+ self.dec.remove_weight_norm()
521
+ self.flow.remove_weight_norm()
522
+ self.enc_q.remove_weight_norm()
523
+
524
+ def forward(self, phone, phone_lengths, y, y_lengths, ds):
525
+ g = self.emb_g(ds).unsqueeze(-1)
526
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
527
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
528
+ z_p = self.flow(z, y_mask, g=g)
529
+ z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
530
+ o = self.dec(z_slice, g=g)
531
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
532
+
533
+ def infer(self, phone, phone_lengths, sid, max_len=None):
534
+ g = self.emb_g(sid).unsqueeze(-1)
535
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
536
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
537
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
538
+ o = self.dec((z * x_mask)[:, :, :max_len], g=g)
539
+ return o, x_mask, (z, z_p, m_p, logs_p)
540
+
541
+ class SynthesizerTrnMs768NSFsid_nono(nn.Module):
542
+ def __init__(
543
+ self,
544
+ spec_channels,
545
+ segment_size,
546
+ inter_channels,
547
+ hidden_channels,
548
+ filter_channels,
549
+ n_heads,
550
+ n_layers,
551
+ kernel_size,
552
+ p_dropout,
553
+ resblock,
554
+ resblock_kernel_sizes,
555
+ resblock_dilation_sizes,
556
+ upsample_rates,
557
+ upsample_initial_channel,
558
+ upsample_kernel_sizes,
559
+ spk_embed_dim,
560
+ gin_channels,
561
+ sr=None,
562
+ **kwargs
563
+ ):
564
+ super().__init__()
565
+ self.spec_channels = spec_channels
566
+ self.inter_channels = inter_channels
567
+ self.hidden_channels = hidden_channels
568
+ self.filter_channels = filter_channels
569
+ self.n_heads = n_heads
570
+ self.n_layers = n_layers
571
+ self.kernel_size = kernel_size
572
+ self.p_dropout = p_dropout
573
+ self.resblock = resblock
574
+ self.resblock_kernel_sizes = resblock_kernel_sizes
575
+ self.resblock_dilation_sizes = resblock_dilation_sizes
576
+ self.upsample_rates = upsample_rates
577
+ self.upsample_initial_channel = upsample_initial_channel
578
+ self.upsample_kernel_sizes = upsample_kernel_sizes
579
+ self.segment_size = segment_size
580
+ self.gin_channels = gin_channels
581
+ self.spk_embed_dim = spk_embed_dim
582
+ self.enc_p = TextEncoder768(inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, f0=False)
583
+ self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
584
+ self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
585
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels)
586
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
587
+
588
+ def remove_weight_norm(self):
589
+ self.dec.remove_weight_norm()
590
+ self.flow.remove_weight_norm()
591
+ self.enc_q.remove_weight_norm()
592
+
593
+ def forward(self, phone, phone_lengths, y, y_lengths, ds):
594
+ g = self.emb_g(ds).unsqueeze(-1)
595
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
596
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
597
+ z_p = self.flow(z, y_mask, g=g)
598
+ z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
599
+ o = self.dec(z_slice, g=g)
600
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
601
+
602
+ def infer(self, phone, phone_lengths, sid, max_len=None):
603
+ g = self.emb_g(sid).unsqueeze(-1)
604
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
605
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
606
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
607
+ o = self.dec((z * x_mask)[:, :, :max_len], g=g)
608
+ return o, x_mask, (z, z_p, m_p, logs_p)
609
+
610
+ class MultiPeriodDiscriminator(torch.nn.Module):
611
+ def __init__(self, use_spectral_norm=False):
612
+ super(MultiPeriodDiscriminator, self).__init__()
613
+ periods = [2, 3, 5, 7, 11, 17]
614
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
615
+ discs += [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
616
+ self.discriminators = nn.ModuleList(discs)
617
+
618
+ def forward(self, y, y_hat):
619
+ y_d_rs = []
620
+ y_d_gs = []
621
+ fmap_rs = []
622
+ fmap_gs = []
623
+ for d in self.discriminators:
624
+ y_d_r, fmap_r = d(y)
625
+ y_d_g, fmap_g = d(y_hat)
626
+ y_d_rs.append(y_d_r)
627
+ y_d_gs.append(y_d_g)
628
+ fmap_rs.append(fmap_r)
629
+ fmap_gs.append(fmap_g)
630
+
631
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
632
+
633
+ class MultiPeriodDiscriminatorV2(torch.nn.Module):
634
+ def __init__(self, use_spectral_norm=False):
635
+ super(MultiPeriodDiscriminatorV2, self).__init__()
636
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
637
+
638
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
639
+ discs += [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
640
+ self.discriminators = nn.ModuleList(discs)
641
+
642
+ def forward(self, y, y_hat):
643
+ y_d_rs = []
644
+ y_d_gs = []
645
+ fmap_rs = []
646
+ fmap_gs = []
647
+ for d in self.discriminators:
648
+ y_d_r, fmap_r = d(y)
649
+ y_d_g, fmap_g = d(y_hat)
650
+ y_d_rs.append(y_d_r)
651
+ y_d_gs.append(y_d_g)
652
+ fmap_rs.append(fmap_r)
653
+ fmap_gs.append(fmap_g)
654
+
655
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
656
+
657
+ class DiscriminatorS(torch.nn.Module):
658
+ def __init__(self, use_spectral_norm=False):
659
+ super(DiscriminatorS, self).__init__()
660
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
661
+ self.convs = nn.ModuleList([norm_f(Conv1d(1, 16, 15, 1, padding=7)), norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), norm_f(Conv1d(1024, 1024, 5, 1, padding=2))])
662
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
663
+
664
+ def forward(self, x):
665
+ fmap = []
666
+ for l in self.convs:
667
+ x = l(x)
668
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
669
+ fmap.append(x)
670
+ x = self.conv_post(x)
671
+ fmap.append(x)
672
+ x = torch.flatten(x, 1, -1)
673
+
674
+ return x, fmap
675
+
676
+
677
+ class DiscriminatorP(torch.nn.Module):
678
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
679
+ super(DiscriminatorP, self).__init__()
680
+ self.period = period
681
+ self.use_spectral_norm = use_spectral_norm
682
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
683
+ self.convs = nn.ModuleList(
684
+ [
685
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
686
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
687
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
688
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
689
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
690
+ ]
691
+ )
692
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
693
+
694
+ def forward(self, x):
695
+ fmap = []
696
+ b, c, t = x.shape
697
+ if t % self.period != 0:
698
+ n_pad = self.period - (t % self.period)
699
+ x = F.pad(x, (0, n_pad), "reflect")
700
+ t = t + n_pad
701
+ x = x.view(b, c, t // self.period, self.period)
702
+
703
+ for l in self.convs:
704
+ x = l(x)
705
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
706
+ fmap.append(x)
707
+ x = self.conv_post(x)
708
+ fmap.append(x)
709
+ x = torch.flatten(x, 1, -1)
710
+
711
+ return x, fmap
lib/infer_pack/models_onnx.py ADDED
@@ -0,0 +1,582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import numpy as np
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+ from torch.nn import Conv1d, ConvTranspose1d, Conv2d
7
+ from lib.infer_pack import modules, attentions, commons
8
+ from torch.nn.utils import weight_norm, remove_weight_norm
9
+ from lib.infer_pack.commons import init_weights, get_padding
10
+ from lib.infer_pack.commons import init_weights, sequence_mask
11
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
+ from lib.infer_pack.modules import ResidualCouplingLayer, WN, ResBlock1, ResBlock2, LRELU_SLOPE
13
+
14
+ class TextEncoder(nn.Module):
15
+ def __init__(
16
+ self,
17
+ input_dim,
18
+ out_channels,
19
+ hidden_channels,
20
+ filter_channels,
21
+ n_heads,
22
+ n_layers,
23
+ kernel_size,
24
+ p_dropout,
25
+ f0=True,
26
+ ):
27
+ super().__init__()
28
+ self.out_channels = out_channels
29
+ self.hidden_channels = hidden_channels
30
+ self.filter_channels = filter_channels
31
+ self.n_heads = n_heads
32
+ self.n_layers = n_layers
33
+ self.kernel_size = kernel_size
34
+ self.p_dropout = p_dropout
35
+ self.emb_phone = nn.Linear(input_dim, hidden_channels)
36
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
37
+
38
+ if f0:self.emb_pitch = nn.Embedding(256, hidden_channels)
39
+ self.encoder = attentions.Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout)
40
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
41
+
42
+ def forward(self, phone, pitch, lengths):
43
+ x = self.emb_phone(phone) + self.emb_pitch(pitch) if pitch is not None else self.emb_phone(phone)
44
+ x *= math.sqrt(self.hidden_channels)
45
+ x = self.lrelu(x)
46
+ x = torch.transpose(x, 1, -1)
47
+ x_mask = torch.unsqueeze(sequence_mask(lengths, x.size(2)), 1).to(x.dtype)
48
+ x = self.encoder(x * x_mask, x_mask)
49
+ stats = self.proj(x) * x_mask
50
+ m, logs = torch.split(stats, self.out_channels, dim=1)
51
+ return m, logs, x_mask
52
+
53
+ class TextEncoder768(nn.Module):
54
+ def __init__(
55
+ self,
56
+ out_channels,
57
+ hidden_channels,
58
+ filter_channels,
59
+ n_heads,
60
+ n_layers,
61
+ kernel_size,
62
+ p_dropout,
63
+ f0=True,
64
+ ):
65
+ super().__init__()
66
+ self.out_channels = out_channels
67
+ self.hidden_channels = hidden_channels
68
+ self.filter_channels = filter_channels
69
+ self.n_heads = n_heads
70
+ self.n_layers = n_layers
71
+ self.kernel_size = kernel_size
72
+ self.p_dropout = p_dropout
73
+ self.emb_phone = nn.Linear(768, hidden_channels)
74
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
75
+ if f0 == True:self.emb_pitch = nn.Embedding(256, hidden_channels)
76
+ self.encoder = attentions.Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout)
77
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
78
+
79
+ def forward(self, phone, pitch, lengths):
80
+ if pitch is None: x = self.emb_phone(phone)
81
+ else: x = self.emb_phone(phone) + self.emb_pitch(pitch)
82
+ x = x * math.sqrt(self.hidden_channels)
83
+ x = self.lrelu(x)
84
+ x = torch.transpose(x, 1, -1)
85
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(x.dtype)
86
+ x = self.encoder(x * x_mask, x_mask)
87
+ stats = self.proj(x) * x_mask
88
+
89
+ m, logs = torch.split(stats, self.out_channels, dim=1)
90
+ return m, logs, x_mask
91
+
92
+ class ResidualCouplingBlock(nn.Module):
93
+ def __init__(
94
+ self,
95
+ channels,
96
+ hidden_channels,
97
+ kernel_size,
98
+ dilation_rate,
99
+ n_layers,
100
+ n_flows=4,
101
+ gin_channels=0,
102
+ ):
103
+ super().__init__()
104
+ self.channels = channels
105
+ self.hidden_channels = hidden_channels
106
+ self.kernel_size = kernel_size
107
+ self.dilation_rate = dilation_rate
108
+ self.n_layers = n_layers
109
+ self.n_flows = n_flows
110
+ self.gin_channels = gin_channels
111
+
112
+ self.flows = nn.ModuleList()
113
+ for _ in range(n_flows):
114
+ self.flows.append(
115
+ modules.ResidualCouplingLayer(
116
+ channels,
117
+ hidden_channels,
118
+ kernel_size,
119
+ dilation_rate,
120
+ n_layers,
121
+ gin_channels=gin_channels,
122
+ mean_only=True,
123
+ )
124
+ )
125
+ self.flows.append(modules.Flip())
126
+
127
+ def forward(self, x, x_mask, g=None, reverse=False):
128
+ if not reverse:
129
+ for flow in self.flows: x, _ = flow(x, x_mask, g=g, reverse=reverse)
130
+ else:
131
+ for flow in reversed(self.flows): x = flow(x, x_mask, g=g, reverse=reverse)
132
+ return x
133
+
134
+ def remove_weight_norm(self):
135
+ for i in range(self.n_flows):
136
+ self.flows[i * 2].remove_weight_norm()
137
+
138
+ class PosteriorEncoder(nn.Module):
139
+ def __init__(
140
+ self,
141
+ in_channels,
142
+ out_channels,
143
+ hidden_channels,
144
+ kernel_size,
145
+ dilation_rate,
146
+ n_layers,
147
+ gin_channels=0,
148
+ ):
149
+ super().__init__()
150
+ self.in_channels = in_channels
151
+ self.out_channels = out_channels
152
+ self.hidden_channels = hidden_channels
153
+ self.kernel_size = kernel_size
154
+ self.dilation_rate = dilation_rate
155
+ self.n_layers = n_layers
156
+ self.gin_channels = gin_channels
157
+
158
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
159
+ self.enc = modules.WN(
160
+ hidden_channels,
161
+ kernel_size,
162
+ dilation_rate,
163
+ n_layers,
164
+ gin_channels=gin_channels,
165
+ )
166
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
167
+
168
+ def forward(self, x, x_lengths, g=None):
169
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
170
+ x.dtype
171
+ )
172
+ x = self.pre(x) * x_mask
173
+ x = self.enc(x, x_mask, g=g)
174
+ stats = self.proj(x) * x_mask
175
+ m, logs = torch.split(stats, self.out_channels, dim=1)
176
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
177
+ return z, m, logs, x_mask
178
+
179
+ def remove_weight_norm(self):
180
+ self.enc.remove_weight_norm()
181
+
182
+ class Generator(torch.nn.Module):
183
+ def __init__(
184
+ self,
185
+ initial_channel,
186
+ resblock,
187
+ resblock_kernel_sizes,
188
+ resblock_dilation_sizes,
189
+ upsample_rates,
190
+ upsample_initial_channel,
191
+ upsample_kernel_sizes,
192
+ gin_channels=0,
193
+ ):
194
+ super(Generator, self).__init__()
195
+ self.num_kernels = len(resblock_kernel_sizes)
196
+ self.num_upsamples = len(upsample_rates)
197
+ self.conv_pre = Conv1d(
198
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
199
+ )
200
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
201
+
202
+ self.ups = nn.ModuleList()
203
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
204
+ self.ups.append(
205
+ weight_norm(
206
+ ConvTranspose1d(
207
+ upsample_initial_channel // (2**i),
208
+ upsample_initial_channel // (2 ** (i + 1)),
209
+ k,
210
+ u,
211
+ padding=(k - u) // 2,
212
+ )
213
+ )
214
+ )
215
+
216
+ self.resblocks = nn.ModuleList()
217
+ for i in range(len(self.ups)):
218
+ ch = upsample_initial_channel // (2 ** (i + 1))
219
+ for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes): self.resblocks.append(resblock(ch, k, d))
220
+
221
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
222
+ self.ups.apply(init_weights)
223
+
224
+ if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
225
+
226
+ def forward(self, x, g=None):
227
+ x = self.conv_pre(x)
228
+ if g is not None: x = x + self.cond(g)
229
+
230
+ for i in range(self.num_upsamples):
231
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
232
+ x = self.ups[i](x)
233
+ xs = None
234
+ for j in range(self.num_kernels):
235
+ if xs is None: xs = self.resblocks[i * self.num_kernels + j](x)
236
+ else: xs += self.resblocks[i * self.num_kernels + j](x)
237
+ x = xs / self.num_kernels
238
+ x = F.leaky_relu(x)
239
+ x = self.conv_post(x)
240
+ x = torch.tanh(x)
241
+
242
+ return x
243
+
244
+ def remove_weight_norm(self):
245
+ for l in self.ups: remove_weight_norm(l)
246
+ for l in self.resblocks: l.remove_weight_norm()
247
+
248
+ class SineGen(torch.nn.Module):
249
+ def __init__(
250
+ self,
251
+ samp_rate,
252
+ harmonic_num=0,
253
+ sine_amp=0.1,
254
+ noise_std=0.003,
255
+ voiced_threshold=0,
256
+ ):
257
+ super(SineGen, self).__init__()
258
+ self.sine_amp = sine_amp
259
+ self.noise_std = noise_std
260
+ self.harmonic_num = harmonic_num
261
+ self.dim = self.harmonic_num + 1
262
+ self.sampling_rate = samp_rate
263
+ self.voiced_threshold = voiced_threshold
264
+
265
+ def _f02uv(self, f0):
266
+ uv = torch.ones_like(f0)
267
+ uv = uv * (f0 > self.voiced_threshold)
268
+ return uv
269
+
270
+ def forward(self, f0, upp):
271
+ with torch.no_grad():
272
+ f0 = f0[:, None].transpose(1, 2)
273
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
274
+ f0_buf[:, :, 0] = f0[:, :, 0]
275
+ for idx in np.arange(self.harmonic_num): f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
276
+ rad_values = (f0_buf / self.sampling_rate) % 1
277
+ rand_ini = torch.rand(f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device)
278
+ rand_ini[:, 0] = 0
279
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
280
+ tmp_over_one = torch.cumsum(rad_values, 1)
281
+ tmp_over_one *= upp
282
+ tmp_over_one = F.interpolate(tmp_over_one.transpose(2, 1), scale_factor=upp, mode="linear", align_corners=True).transpose(2, 1)
283
+ rad_values = F.interpolate(rad_values.transpose(2, 1), scale_factor=upp, mode="nearest").transpose(2, 1)
284
+ tmp_over_one %= 1
285
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
286
+ cumsum_shift = torch.zeros_like(rad_values)
287
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
288
+ sine_waves = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi)
289
+ sine_waves = sine_waves * self.sine_amp
290
+ uv = self._f02uv(f0)
291
+ uv = F.interpolate(uv.transpose(2, 1), scale_factor=upp, mode="nearest").transpose(2, 1)
292
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
293
+ noise = noise_amp * torch.randn_like(sine_waves)
294
+ sine_waves = sine_waves * uv + noise
295
+ return sine_waves, uv, noise
296
+
297
+ class SourceModuleHnNSF(torch.nn.Module):
298
+ def __init__(
299
+ self,
300
+ sampling_rate,
301
+ harmonic_num=0,
302
+ sine_amp=0.1,
303
+ add_noise_std=0.003,
304
+ voiced_threshod=0,
305
+ is_half=True,
306
+ ):
307
+ super(SourceModuleHnNSF, self).__init__()
308
+
309
+ self.sine_amp = sine_amp
310
+ self.noise_std = add_noise_std
311
+ self.is_half = is_half
312
+ self.l_sin_gen = SineGen(sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod)
313
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
314
+ self.l_tanh = torch.nn.Tanh()
315
+
316
+ def forward(self, x, upp=None):
317
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
318
+ if self.is_half: sine_wavs = sine_wavs.half()
319
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
320
+ return sine_merge, None, None
321
+
322
+
323
+ class GeneratorNSF(torch.nn.Module):
324
+ def __init__(
325
+ self,
326
+ initial_channel,
327
+ resblock,
328
+ resblock_kernel_sizes,
329
+ resblock_dilation_sizes,
330
+ upsample_rates,
331
+ upsample_initial_channel,
332
+ upsample_kernel_sizes,
333
+ gin_channels,
334
+ sr,
335
+ is_half=False,
336
+ ):
337
+ super(GeneratorNSF, self).__init__()
338
+ self.num_kernels = len(resblock_kernel_sizes)
339
+ self.num_upsamples = len(upsample_rates)
340
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
341
+ self.m_source = SourceModuleHnNSF(sampling_rate=sr, harmonic_num=0, is_half=is_half)
342
+ self.noise_convs = nn.ModuleList()
343
+ self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
344
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
345
+
346
+ self.ups = nn.ModuleList()
347
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
348
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
349
+ self.ups.append(weight_norm(ConvTranspose1d(upsample_initial_channel // (2**i), upsample_initial_channel // (2 ** (i + 1)), k, u, padding=(k - u) // 2,)))
350
+ if i + 1 < len(upsample_rates):
351
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
352
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2,))
353
+ else: self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
354
+
355
+ self.resblocks = nn.ModuleList()
356
+ for i in range(len(self.ups)):
357
+ ch = upsample_initial_channel // (2 ** (i + 1))
358
+ for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes): self.resblocks.append(resblock(ch, k, d))
359
+
360
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
361
+ self.ups.apply(init_weights)
362
+
363
+ if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
364
+
365
+ self.upp = np.prod(upsample_rates)
366
+
367
+ def forward(self, x, f0, g=None):
368
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
369
+ har_source = har_source.transpose(1, 2)
370
+ x = self.conv_pre(x)
371
+ if g is not None: x = x + self.cond(g)
372
+
373
+ for i in range(self.num_upsamples):
374
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
375
+ x = self.ups[i](x)
376
+ x_source = self.noise_convs[i](har_source)
377
+ x = x + x_source
378
+ xs = None
379
+ for j in range(self.num_kernels):
380
+ if xs is None: xs = self.resblocks[i * self.num_kernels + j](x)
381
+ else: xs += self.resblocks[i * self.num_kernels + j](x)
382
+ x = xs / self.num_kernels
383
+ x = F.leaky_relu(x)
384
+ x = self.conv_post(x)
385
+ x = torch.tanh(x)
386
+ return x
387
+
388
+ def remove_weight_norm(self):
389
+ for l in self.ups:
390
+ remove_weight_norm(l)
391
+ for l in self.resblocks:
392
+ l.remove_weight_norm()
393
+
394
+ sr2sr = {"32k": 32000,"40k": 40000,"48k": 48000,}
395
+
396
+ class SynthesizerTrnMsNSFsidM(nn.Module):
397
+ def __init__(
398
+ self,
399
+ spec_channels,
400
+ segment_size,
401
+ inter_channels,
402
+ hidden_channels,
403
+ filter_channels,
404
+ n_heads,
405
+ n_layers,
406
+ kernel_size,
407
+ p_dropout,
408
+ resblock,
409
+ resblock_kernel_sizes,
410
+ resblock_dilation_sizes,
411
+ upsample_rates,
412
+ upsample_initial_channel,
413
+ upsample_kernel_sizes,
414
+ spk_embed_dim,
415
+ gin_channels,
416
+ sr,
417
+ version,
418
+ **kwargs
419
+ ):
420
+ super().__init__()
421
+ if type(sr) == type("strr"): sr = sr2sr[sr]
422
+ self.spec_channels = spec_channels
423
+ self.inter_channels = inter_channels
424
+ self.hidden_channels = hidden_channels
425
+ self.filter_channels = filter_channels
426
+ self.n_heads = n_heads
427
+ self.n_layers = n_layers
428
+ self.kernel_size = kernel_size
429
+ self.p_dropout = p_dropout
430
+ self.resblock = resblock
431
+ self.resblock_kernel_sizes = resblock_kernel_sizes
432
+ self.resblock_dilation_sizes = resblock_dilation_sizes
433
+ self.upsample_rates = upsample_rates
434
+ self.upsample_initial_channel = upsample_initial_channel
435
+ self.upsample_kernel_sizes = upsample_kernel_sizes
436
+ self.segment_size = segment_size
437
+ self.gin_channels = gin_channels
438
+ self.spk_embed_dim = spk_embed_dim
439
+ if version == "v1": self.enc_p = TextEncoder(inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout)
440
+ else: self.enc_p = TextEncoder768(inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout)
441
+ self.dec = GeneratorNSF(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels, sr=sr, is_half=kwargs["is_half"])
442
+ self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
443
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels)
444
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
445
+ self.speaker_map = None
446
+
447
+ def remove_weight_norm(self):
448
+ self.dec.remove_weight_norm()
449
+ self.flow.remove_weight_norm()
450
+ self.enc_q.remove_weight_norm()
451
+
452
+ def construct_spkmixmap(self, n_speaker):
453
+ self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels))
454
+ for i in range(n_speaker): self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]]))
455
+ self.speaker_map = self.speaker_map.unsqueeze(0)
456
+
457
+ def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None):
458
+ if self.speaker_map is not None:
459
+ g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1))
460
+ g = g * self.speaker_map
461
+ g = torch.sum(g, dim=1)
462
+ g = g.transpose(0, -1).transpose(0, -2).squeeze(0)
463
+ else:
464
+ g = g.unsqueeze(0)
465
+ g = self.emb_g(g).transpose(1, 2)
466
+
467
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
468
+ z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
469
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
470
+ return self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
471
+
472
+ class MultiPeriodDiscriminator(torch.nn.Module):
473
+ def __init__(self, use_spectral_norm=False):
474
+ super(MultiPeriodDiscriminator, self).__init__()
475
+ periods = [2, 3, 5, 7, 11, 17]
476
+
477
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
478
+ discs += [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
479
+ self.discriminators = nn.ModuleList(discs)
480
+
481
+ def forward(self, y, y_hat):
482
+ y_d_rs = []
483
+ y_d_gs = []
484
+ fmap_rs = []
485
+ fmap_gs = []
486
+ for d in self.discriminators:
487
+ y_d_r, fmap_r = d(y)
488
+ y_d_g, fmap_g = d(y_hat)
489
+ y_d_rs.append(y_d_r)
490
+ y_d_gs.append(y_d_g)
491
+ fmap_rs.append(fmap_r)
492
+ fmap_gs.append(fmap_g)
493
+
494
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
495
+
496
+ class MultiPeriodDiscriminatorV2(torch.nn.Module):
497
+ def __init__(self, use_spectral_norm=False):
498
+ super(MultiPeriodDiscriminatorV2, self).__init__()
499
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
500
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
501
+ discs += [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
502
+ self.discriminators = nn.ModuleList(discs)
503
+
504
+ def forward(self, y, y_hat):
505
+ y_d_rs = []
506
+ y_d_gs = []
507
+ fmap_rs = []
508
+ fmap_gs = []
509
+ for d in self.discriminators:
510
+ y_d_r, fmap_r = d(y)
511
+ y_d_g, fmap_g = d(y_hat)
512
+ y_d_rs.append(y_d_r)
513
+ y_d_gs.append(y_d_g)
514
+ fmap_rs.append(fmap_r)
515
+ fmap_gs.append(fmap_g)
516
+
517
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
518
+
519
+ class DiscriminatorS(torch.nn.Module):
520
+ def __init__(self, use_spectral_norm=False):
521
+ super(DiscriminatorS, self).__init__()
522
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
523
+ self.convs = nn.ModuleList(
524
+ [
525
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
526
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
527
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
528
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
529
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
530
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
531
+ ]
532
+ )
533
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
534
+
535
+ def forward(self, x):
536
+ fmap = []
537
+
538
+ for l in self.convs:
539
+ x = l(x)
540
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
541
+ fmap.append(x)
542
+ x = self.conv_post(x)
543
+ fmap.append(x)
544
+ x = torch.flatten(x, 1, -1)
545
+
546
+ return x, fmap
547
+
548
+ class DiscriminatorP(torch.nn.Module):
549
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
550
+ super(DiscriminatorP, self).__init__()
551
+ self.period = period
552
+ self.use_spectral_norm = use_spectral_norm
553
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
554
+ self.convs = nn.ModuleList(
555
+ [
556
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0),)),
557
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0),)),
558
+ norm_f(Conv2d( 128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0),)),
559
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0),)),
560
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0),)),
561
+ ]
562
+ )
563
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
564
+
565
+ def forward(self, x):
566
+ fmap = []
567
+ b, c, t = x.shape
568
+ if t % self.period != 0:
569
+ n_pad = self.period - (t % self.period)
570
+ x = F.pad(x, (0, n_pad), "reflect")
571
+ t = t + n_pad
572
+ x = x.view(b, c, t // self.period, self.period)
573
+
574
+ for l in self.convs:
575
+ x = l(x)
576
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
577
+ fmap.append(x)
578
+ x = self.conv_post(x)
579
+ fmap.append(x)
580
+ x = torch.flatten(x, 1, -1)
581
+
582
+ return x, fmap
lib/infer_pack/modules.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import scipy
5
+ import torch
6
+ from torch import nn
7
+ from torch.nn import functional as F
8
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
9
+ from torch.nn.utils import weight_norm, remove_weight_norm
10
+ from lib.infer_pack import commons
11
+ from lib.infer_pack.commons import init_weights, get_padding
12
+ from lib.infer_pack.transforms import piecewise_rational_quadratic_transform
13
+
14
+ LRELU_SLOPE = 0.1
15
+
16
+ class LayerNorm(nn.Module):
17
+ def __init__(self, channels, eps=1e-5):
18
+ super().__init__()
19
+ self.channels = channels
20
+ self.eps = eps
21
+ self.gamma = nn.Parameter(torch.ones(channels))
22
+ self.beta = nn.Parameter(torch.zeros(channels))
23
+
24
+ def forward(self, x):
25
+ x = x.transpose(1, -1)
26
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
27
+ return x.transpose(1, -1)
28
+
29
+ class ConvReluNorm(nn.Module):
30
+ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
31
+ super().__init__()
32
+ self.in_channels = in_channels
33
+ self.hidden_channels = hidden_channels
34
+ self.out_channels = out_channels
35
+ self.kernel_size = kernel_size
36
+ self.n_layers = n_layers
37
+ self.p_dropout = p_dropout
38
+ assert n_layers > 1, "Number of layers should be larger than 0."
39
+ self.conv_layers = nn.ModuleList()
40
+ self.norm_layers = nn.ModuleList()
41
+ self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
42
+ self.norm_layers.append(LayerNorm(hidden_channels))
43
+ self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
44
+ for _ in range(n_layers - 1):
45
+ self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
46
+ self.norm_layers.append(LayerNorm(hidden_channels))
47
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
48
+ self.proj.weight.data.zero_()
49
+ self.proj.bias.data.zero_()
50
+
51
+ def forward(self, x, x_mask):
52
+ x_org = x
53
+ for i in range(self.n_layers):
54
+ x = self.conv_layers[i](x * x_mask)
55
+ x = self.norm_layers[i](x)
56
+ x = self.relu_drop(x)
57
+ x = x_org + self.proj(x)
58
+ return x * x_mask
59
+
60
+ class DDSConv(nn.Module):
61
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
62
+ super().__init__()
63
+ self.channels = channels
64
+ self.kernel_size = kernel_size
65
+ self.n_layers = n_layers
66
+ self.p_dropout = p_dropout
67
+ self.drop = nn.Dropout(p_dropout)
68
+ self.convs_sep = nn.ModuleList()
69
+ self.convs_1x1 = nn.ModuleList()
70
+ self.norms_1 = nn.ModuleList()
71
+ self.norms_2 = nn.ModuleList()
72
+ for i in range(n_layers):
73
+ dilation = kernel_size**i
74
+ padding = (kernel_size * dilation - dilation)
75
+ self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, groups=channels, dilation=dilation, padding=padding))
76
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
77
+ self.norms_1.append(LayerNorm(channels))
78
+ self.norms_2.append(LayerNorm(channels))
79
+
80
+ def forward(self, x, x_mask, g=None):
81
+ if g is not None: x = x + g
82
+ for i in range(self.n_layers):
83
+ y = self.convs_sep[i](x * x_mask)
84
+ y = self.norms_1[i](y)
85
+ y = F.gelu(y)
86
+ y = self.convs_1x1[i](y)
87
+ y = self.norms_2[i](y)
88
+ y = F.gelu(y)
89
+ y = self.drop(y)
90
+ x = x + y
91
+ return x * x_mask
92
+
93
+ class WN(torch.nn.Module):
94
+ def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
95
+ super(WN, self).__init__()
96
+ assert kernel_size % 2 == 1
97
+ self.hidden_channels = hidden_channels
98
+ self.kernel_size = (kernel_size,)
99
+ self.dilation_rate = dilation_rate
100
+ self.n_layers = n_layers
101
+ self.gin_channels = gin_channels
102
+ self.p_dropout = p_dropout
103
+
104
+ self.in_layers = torch.nn.ModuleList()
105
+ self.res_skip_layers = torch.nn.ModuleList()
106
+ self.drop = nn.Dropout(p_dropout)
107
+
108
+ if gin_channels != 0:
109
+ cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1)
110
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
111
+
112
+ for i in range(n_layers):
113
+ dilation = dilation_rate**i
114
+ padding = int((kernel_size * dilation - dilation) / 2)
115
+ in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation, padding=padding)
116
+ in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
117
+ self.in_layers.append(in_layer)
118
+
119
+ if i < n_layers - 1: res_skip_channels = 2 * hidden_channels
120
+ else: res_skip_channels = hidden_channels
121
+
122
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
123
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
124
+ self.res_skip_layers.append(res_skip_layer)
125
+
126
+ def forward(self, x, x_mask, g=None, **kwargs):
127
+ output = torch.zeros_like(x)
128
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
129
+
130
+ if g is not None: g = self.cond_layer(g)
131
+
132
+ for i in range(self.n_layers):
133
+ x_in = self.in_layers[i](x)
134
+ if g is not None:
135
+ cond_offset = i * 2 * self.hidden_channels
136
+ g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
137
+ else: g_l = torch.zeros_like(x_in)
138
+
139
+ acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
140
+ acts = self.drop(acts)
141
+
142
+ res_skip_acts = self.res_skip_layers[i](acts)
143
+ if i < self.n_layers - 1:
144
+ res_acts = res_skip_acts[:, : self.hidden_channels, :]
145
+ x = (x + res_acts) * x_mask
146
+ output = output + res_skip_acts[:, self.hidden_channels :, :]
147
+ else: output = output + res_skip_acts
148
+ return output * x_mask
149
+
150
+ def remove_weight_norm(self):
151
+ if self.gin_channels != 0: torch.nn.utils.remove_weight_norm(self.cond_layer)
152
+ for l in self.in_layers: torch.nn.utils.remove_weight_norm(l)
153
+ for l in self.res_skip_layers: torch.nn.utils.remove_weight_norm(l)
154
+
155
+ class ResBlock1(torch.nn.Module):
156
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
157
+ super(ResBlock1, self).__init__()
158
+ self.convs1 = nn.ModuleList(
159
+ [
160
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]))),
161
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1]))),
162
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], padding=get_padding(kernel_size, dilation[2])))])
163
+ self.convs1.apply(init_weights)
164
+
165
+ self.convs2 = nn.ModuleList(
166
+ [
167
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))),
168
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))),
169
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1)))])
170
+ self.convs2.apply(init_weights)
171
+
172
+ def forward(self, x, x_mask=None):
173
+ for c1, c2 in zip(self.convs1, self.convs2):
174
+ xt = F.leaky_relu(x, LRELU_SLOPE)
175
+ if x_mask is not None: xt = xt * x_mask
176
+ xt = c1(xt)
177
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
178
+ if x_mask is not None: xt = xt * x_mask
179
+ xt = c2(xt)
180
+ x = xt + x
181
+ if x_mask is not None:
182
+ x = x * x_mask
183
+ return x
184
+
185
+ def remove_weight_norm(self):
186
+ for l in self.convs1: remove_weight_norm(l)
187
+ for l in self.convs2: remove_weight_norm(l)
188
+
189
+ class ResBlock2(torch.nn.Module):
190
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
191
+ super(ResBlock2, self).__init__()
192
+ self.convs = nn.ModuleList([weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]))),weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1])))])
193
+ self.convs.apply(init_weights)
194
+
195
+ def forward(self, x, x_mask=None):
196
+ for c in self.convs:
197
+ xt = F.leaky_relu(x, LRELU_SLOPE)
198
+ if x_mask is not None: xt = xt * x_mask
199
+ xt = c(xt)
200
+ x = xt + x
201
+ if x_mask is not None: x = x * x_mask
202
+ return x
203
+
204
+ def remove_weight_norm(self):
205
+ for l in self.convs: remove_weight_norm(l)
206
+
207
+ class Log(nn.Module):
208
+ def forward(self, x, x_mask, reverse=False, **kwargs):
209
+ if not reverse:
210
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
211
+ logdet = torch.sum(-y, [1, 2])
212
+ return y, logdet
213
+ else:
214
+ x = torch.exp(x) * x_mask
215
+ return x
216
+
217
+ class Flip(nn.Module):
218
+ def forward(self, x, *args, reverse=False, **kwargs):
219
+ x = torch.flip(x, [1])
220
+ if reverse: return x
221
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
222
+ return x, logdet
223
+
224
+ class ElementwiseAffine(nn.Module):
225
+ def __init__(self, channels):
226
+ super().__init__()
227
+ self.channels = channels
228
+ self.m = nn.Parameter(torch.zeros(channels, 1))
229
+ self.logs = nn.Parameter(torch.zeros(channels, 1))
230
+
231
+ def forward(self, x, x_mask, reverse=False, **kwargs):
232
+ if not reverse:
233
+ y = self.m + torch.exp(self.logs) * x
234
+ y = y * x_mask
235
+ logdet = torch.sum(self.logs * x_mask, [1, 2])
236
+ return y, logdet
237
+ else:
238
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
239
+ return x
240
+
241
+ class ResidualCouplingLayer(nn.Module):
242
+ def __init__(self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=0, gin_channels=0, mean_only=False):
243
+ assert channels % 2 == 0, "channels should be divisible by 2"
244
+ super().__init__()
245
+ self.channels = channels
246
+ self.hidden_channels = hidden_channels
247
+ self.kernel_size = kernel_size
248
+ self.dilation_rate = dilation_rate
249
+ self.n_layers = n_layers
250
+ self.half_channels = channels // 2
251
+ self.mean_only = mean_only
252
+
253
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
254
+ self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
255
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
256
+ self.post.weight.data.zero_()
257
+ self.post.bias.data.zero_()
258
+
259
+ def forward(self, x, x_mask, g=None, reverse=False):
260
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
261
+ h = self.pre(x0) * x_mask
262
+ h = self.enc(h, x_mask, g=g)
263
+ stats = self.post(h) * x_mask
264
+ if not self.mean_only: m, logs = torch.split(stats, [self.half_channels] * 2, 1)
265
+ else:
266
+ m = stats
267
+ logs = torch.zeros_like(m)
268
+
269
+ if not reverse:
270
+ x1 = m + x1 * torch.exp(logs) * x_mask
271
+ x = torch.cat([x0, x1], 1)
272
+ logdet = torch.sum(logs, [1, 2])
273
+ return x, logdet
274
+ else:
275
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
276
+ x = torch.cat([x0, x1], 1)
277
+ return x
278
+
279
+ def remove_weight_norm(self): self.enc.remove_weight_norm()
280
+
281
+ class ConvFlow(nn.Module):
282
+ def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
283
+ super().__init__()
284
+ self.in_channels = in_channels
285
+ self.filter_channels = filter_channels
286
+ self.kernel_size = kernel_size
287
+ self.n_layers = n_layers
288
+ self.num_bins = num_bins
289
+ self.tail_bound = tail_bound
290
+ self.half_channels = in_channels // 2
291
+
292
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
293
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
294
+ self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
295
+ self.proj.weight.data.zero_()
296
+ self.proj.bias.data.zero_()
297
+
298
+ def forward(self, x, x_mask, g=None, reverse=False):
299
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
300
+ h = self.pre(x0)
301
+ h = self.convs(h, x_mask, g=g)
302
+ h = self.proj(h) * x_mask
303
+
304
+ b, c, t = x0.shape
305
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2)
306
+
307
+ unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
308
+ unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(self.filter_channels)
309
+ unnormalized_derivatives = h[..., 2 * self.num_bins :]
310
+
311
+ x1, logabsdet = piecewise_rational_quadratic_transform(x1, unnormalized_widths, unnormalized_heights, unnormalized_derivatives, inverse=reverse, tails="linear", tail_bound=self.tail_bound)
312
+
313
+ x = torch.cat([x0, x1], 1) * x_mask
314
+ logdet = torch.sum(logabsdet * x_mask, [1, 2])
315
+ return (x, logdet) if not reverse else x
lib/infer_pack/onnx_inference.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import onnxruntime
2
+ import librosa
3
+ import numpy as np
4
+ from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
5
+
6
+ VEC_PATH = "pretrained/vec-768-layer-12.onnx"
7
+ MAX_LENGTH = 50.0
8
+ F0_MIN = 50
9
+ F0_MAX = 1100
10
+ F0_MEL_MIN = 1127 * np.log(1 + F0_MIN / 700)
11
+ F0_MEL_MAX = 1127 * np.log(1 + F0_MAX / 700)
12
+ RESAMPLING_RATE = 16000
13
+
14
+ class ContentVectorModel:
15
+ def __init__(self, vector_path=VEC_PATH):
16
+ providers = ["CPUExecutionProvider"]
17
+ self.model = onnxruntime.InferenceSession(vector_path, providers=providers)
18
+
19
+ def __call__(self, audio_wave): return self.process_audio(audio_wave)
20
+
21
+ def process_audio(self, audio_wave):
22
+ features = audio_wave.mean(-1) if audio_wave.ndim == 2 else audio_wave
23
+ features = np.expand_dims(np.expand_dims(features, 0), 0)
24
+ onnx_input = {self.model.get_inputs()[0].name: features}
25
+ logits = self.model.run(None, onnx_input)[0]
26
+ return logits.transpose(0, 2, 1)
27
+
28
+
29
+ class OnnxRVC:
30
+ def __init__(self, model_path, sampling_rate=40000, hop_size=512, vector_path=VEC_PATH):
31
+ self.vec_model = ContentVectorModel(f"{vector_path}.onnx")
32
+ providers = ["CPUExecutionProvider"]
33
+ self.model = onnxruntime.InferenceSession(model_path, providers=providers)
34
+ self.sampling_rate = sampling_rate
35
+ self.hop_size = hop_size
36
+
37
+ def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
38
+ onnx_input = {self.model.get_inputs()[0].name: hubert, self.model.get_inputs()[1].name: hubert_length, self.model.get_inputs()[2].name: pitch, self.model.get_inputs()[3].name: pitchf, self.model.get_inputs()[4].name: ds, self.model.get_inputs()[5].name: rnd}
39
+ return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
40
+
41
+ def inference(self, raw_path, sid, f0_method="pm", f0_up_key=0, pad_time=0.5, cr_threshold=0.02):
42
+ f0_predictor = PMF0Predictor(hop_length=self.hop_size, sampling_rate=self.sampling_rate, threshold=cr_threshold)
43
+ wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
44
+ org_length = len(wav)
45
+ if org_length / sr > MAX_LENGTH: raise RuntimeError("Reached Max Length")
46
+
47
+ wav16k = librosa.resample(wav, orig_sr=sr, target_sr=RESAMPLING_RATE)
48
+ hubert = self.vec_model(wav16k)
49
+ hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
50
+ hubert_length = hubert.shape[1]
51
+
52
+ pitchf = f0_predictor.compute_f0(wav, hubert_length)
53
+ pitchf *= 2 ** (f0_up_key / 12)
54
+ pitch = pitchf.copy()
55
+ f0_mel = 1127 * np.log(1 + pitch / 700)
56
+ f0_mel = np.clip(f0_mel - F0_MEL_MIN, 0, None) * 254 / (F0_MEL_MAX - F0_MEL_MIN) + 1
57
+ pitch = np.rint(f0_mel).astype(np.int64)
58
+
59
+ pitchf = pitchf.reshape(1, -1).astype(np.float32)
60
+ pitch = pitch.reshape(1, -1)
61
+ ds = np.array([sid]).astype(np.int64)
62
+ rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
63
+ hubert_length = np.array([hubert_length]).astype(np.int64)
64
+
65
+ out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
66
+ out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
67
+ return out_wav[:org_length]
lib/infer_pack/transforms.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn import functional as F
3
+ import numpy as np
4
+
5
+ DEFAULT_MIN_BIN_WIDTH = 1e-3
6
+ DEFAULT_MIN_BIN_HEIGHT = 1e-3
7
+ DEFAULT_MIN_DERIVATIVE = 1e-3
8
+
9
+ def piecewise_rational_quadratic_transform(inputs, unnormalized_widths, unnormalized_heights, unnormalized_derivatives, inverse=False, tails=None, tail_bound=1.0, min_bin_width=DEFAULT_MIN_BIN_WIDTH, min_bin_height=DEFAULT_MIN_BIN_HEIGHT, min_derivative=DEFAULT_MIN_DERIVATIVE):
10
+ if tails is None:
11
+ spline_fn = rational_quadratic_spline
12
+ spline_kwargs = {}
13
+ else:
14
+ spline_fn = unconstrained_rational_quadratic_spline
15
+ spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
16
+
17
+ return spline_fn(inputs=inputs, unnormalized_widths=unnormalized_widths, unnormalized_heights=unnormalized_heights, unnormalized_derivatives=unnormalized_derivatives, inverse=inverse, min_bin_width=min_bin_width, min_bin_height=min_bin_height, min_derivative=min_derivative, **spline_kwargs)
18
+
19
+ def searchsorted(bin_locations, inputs, eps=1e-6):
20
+ bin_locations[..., -1] += eps
21
+ return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
22
+
23
+ def unconstrained_rational_quadratic_spline(inputs, unnormalized_widths, unnormalized_heights, unnormalized_derivatives, inverse=False, tails="linear", tail_bound=1.0, min_bin_width=DEFAULT_MIN_BIN_WIDTH, min_bin_height=DEFAULT_MIN_BIN_HEIGHT, min_derivative=DEFAULT_MIN_DERIVATIVE):
24
+ if tails != "linear": raise RuntimeError(f"{tails} tails are not implemented.")
25
+
26
+ unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
27
+ constant = np.log(np.exp(1 - min_derivative) - 1)
28
+ unnormalized_derivatives[..., 0] = constant
29
+ unnormalized_derivatives[..., -1] = constant
30
+
31
+ inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
32
+ outside_interval_mask = ~inside_interval_mask
33
+ outputs = torch.where(outside_interval_mask, inputs, torch.zeros_like(inputs))
34
+ logabsdet = torch.zeros_like(inputs)
35
+
36
+ inside_outputs, inside_logabsdet = rational_quadratic_spline(
37
+ inputs=inputs[inside_interval_mask],
38
+ unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
39
+ unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
40
+ unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
41
+ inverse=inverse, left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
42
+ min_bin_width=min_bin_width, min_bin_height=min_bin_height, min_derivative=min_derivative)
43
+
44
+ outputs[inside_interval_mask] = inside_outputs
45
+ logabsdet[inside_interval_mask] = inside_logabsdet
46
+
47
+ return outputs, logabsdet
48
+
49
+ def rational_quadratic_spline(inputs, unnormalized_widths, unnormalized_heights, unnormalized_derivatives, inverse=False, left=0.0, right=1.0, bottom=0.0, top=1.0, min_bin_width=DEFAULT_MIN_BIN_WIDTH, min_bin_height=DEFAULT_MIN_BIN_HEIGHT, min_derivative=DEFAULT_MIN_DERIVATIVE):
50
+ num_bins = unnormalized_widths.shape[-1]
51
+
52
+ if min_bin_width * num_bins > 1.0: raise ValueError("Minimal bin width too large for the number of bins")
53
+ if min_bin_height * num_bins > 1.0: raise ValueError("Minimal bin height too large for the number of bins")
54
+
55
+ widths, heights = compute_widths_and_heights(unnormalized_widths, unnormalized_heights, min_bin_width, min_bin_height, num_bins, left, right, bottom, top)
56
+ cumwidths, cumheights = widths.cumsum(dim=-1), heights.cumsum(dim=-1)
57
+ cumwidths[..., 0] = left
58
+ cumwidths[..., -1] = right
59
+ cumheights[..., 0] = bottom
60
+ cumheights[..., -1] = top
61
+ widths, heights = cumwidths[..., 1:] - cumwidths[..., :-1], cumheights[..., 1:] - cumheights[..., :-1]
62
+
63
+ derivatives = min_derivative + F.softplus(unnormalized_derivatives)
64
+
65
+ if inverse: bin_idx = searchsorted(cumheights, inputs)[..., None]
66
+ else: bin_idx = searchsorted(cumwidths, inputs)[..., None]
67
+
68
+ gather_args = (-1, bin_idx)
69
+ input_cumwidths, input_bin_widths, input_cumheights, input_delta, input_derivatives, input_derivatives_plus_one, input_heights = map(
70
+ lambda tensor: tensor.gather(*gather_args)[..., 0],
71
+ (cumwidths, widths, cumheights, heights / widths, derivatives, derivatives[..., 1:], heights))
72
+
73
+ if inverse: outputs, logabsdet = inverse_rational_quadratic_spline(inputs, input_cumheights, input_heights, input_derivatives, input_derivatives_plus_one, input_delta, input_bin_widths, input_cumwidths)
74
+ else: outputs, logabsdet = direct_rational_quadratic_spline(inputs, input_cumwidths, input_bin_widths, input_cumheights, input_heights, input_derivatives, input_derivatives_plus_one, input_delta)
75
+
76
+ return outputs, logabsdet
77
+
78
+ def compute_widths_and_heights(unnormalized_widths, unnormalized_heights, min_bin_width, min_bin_height, num_bins, left, right, bottom, top):
79
+ widths = F.softmax(unnormalized_widths, dim=-1)
80
+ widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
81
+ widths = (right - left) * widths + left
82
+
83
+ heights = F.softmax(unnormalized_heights, dim=-1)
84
+ heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
85
+ heights = (top - bottom) * heights + bottom
86
+
87
+ return widths, heights
88
+
89
+ def inverse_rational_quadratic_spline(inputs, input_cumheights, input_heights, input_derivatives, input_derivatives_plus_one, input_delta, input_bin_widths, input_cumwidths):
90
+ a = (inputs - input_cumheights) * (input_derivatives + input_derivatives_plus_one - 2 * input_delta) + input_heights * (input_delta - input_derivatives)
91
+ b = input_heights * input_derivatives - (inputs - input_cumheights) * (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
92
+ c = -input_delta * (inputs - input_cumheights)
93
+
94
+ discriminant = b.pow(2) - 4 * a * c
95
+ assert (discriminant >= 0).all()
96
+
97
+ root = (2 * c) / (-b - torch.sqrt(discriminant))
98
+ outputs = root * input_bin_widths + input_cumwidths
99
+ theta_one_minus_theta = root * (1 - root)
100
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)* theta_one_minus_theta)
101
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)+ 2 * input_delta * theta_one_minus_theta+ input_derivatives * (1 - root).pow(2))
102
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
103
+
104
+ return outputs, -logabsdet
105
+
106
+ def direct_rational_quadratic_spline(inputs, input_cumwidths, input_bin_widths, input_cumheights, input_heights, input_derivatives, input_derivatives_plus_one, input_delta):
107
+ theta = (inputs - input_cumwidths) / input_bin_widths
108
+ theta_one_minus_theta = theta * (1 - theta)
109
+ numerator = input_heights * (input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta)
110
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) * theta_one_minus_theta)
111
+ outputs = input_cumheights + numerator / denominator
112
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) + 2 * input_delta * theta_one_minus_theta + input_derivatives * (1 - theta).pow(2))
113
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
114
+
115
+ return outputs, logabsdet
models/FernandoAlonso/FernandoAlonso/added_IVF582_Flat_nprobe_1_fernando2_v2.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5d70bed6e4bed0b54eaef9c1f2dd180fda94035287baafadb99a456b8cbd52
3
+ size 71801099
models/FernandoAlonso/FernandoAlonso/fernando2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3926c2925ebe47d0c7a8d5b947813f0866a08d2970a10cb69d64f1b6e2371bbc
3
+ size 55224197
models/HolasoyGerman/HolasoyGerman/HolasoyGerman.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db9c812c5ecd6e7afa5b0bee0300741ba87731097c6449b64924d46c10644247
3
+ size 55226033
models/HolasoyGerman/HolasoyGerman/added_IVF3117_Flat_nprobe_1_HolasoyGerman_v2.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a11850d919513a315e2a4ec41586a74c650f0ccaf3fc16d8aebff4e63ce07dc4
3
+ size 384023779
models/HolasoyGerman/model_info.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "HolasoyGerman": {
3
+ "title": "HolasoyGerman",
4
+ "model_path": "HolasoyGerman.pth",
5
+ "feature_retrieval_library": "added_IVF3117_Flat_nprobe_1_HolasoyGerman_v2.index",
6
+ "model_path_checksum": "74633371eb2c2a75292633ee8922fddc",
7
+ "index_path_checksum": "febffc21f866063b40900cd068d379b9"
8
+ }
9
+ }
models/Homer/Homer/HomerEsp.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:309244d07dd5f611f4bca2415a933a6bf499d1a803cd8df45e005d8256373b65
3
+ size 55027130
models/Homer/Homer/added_IVF360_Flat_nprobe_1_HomerEsp_v1.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3b3375a8e01675a1a46439af33ff4941f52eb9592b8784c154dcd55cf9eba03
3
+ size 14861971
models/Homer/model_info.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Homer": {
3
+ "title": "Homer",
4
+ "model_path": "HomerEsp.pth",
5
+ "feature_retrieval_library": "added_IVF360_Flat_nprobe_1_HomerEsp_v1.index",
6
+ "model_path_checksum": "7df9a9cdf205fe13f5ea6d73d7e9fd41",
7
+ "index_path_checksum": "5d554ed74ef44a057d4f997dabe8c3ea"
8
+ }
9
+ }
models/Ibai/Ibai/Ibai.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa102e749f66ede51b3bc43a4cceaeeaac7abd26c9f20348a233d042349d0467
3
+ size 55227410
models/Ibai/Ibai/added_IVF4601_Flat_nprobe_1_Ibai_v2.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:177232ae8f7428fbc925719e11fcfd73a48b0919c3f94fd64eae6ff9f815c440
3
+ size 566889539
models/Ibai/model_info.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Ibai": {
3
+ "title": "Ibai",
4
+ "model_path": "Ibai.pth",
5
+ "feature_retrieval_library": "added_IVF4601_Flat_nprobe_1_Ibai_v2.index",
6
+ "model_path_checksum": "098e96b7158edd654d0e269959ac9a20",
7
+ "index_path_checksum": "b16da2fa68a6341f8fc409a83db68dcf"
8
+ }
9
+ }
models/IlloJuan/IlloJuan/IlloJuan.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80c3229a3b5ee888ffb9f8b85385de9705966b156ea4471619f25186318f55b4
3
+ size 55223738
models/IlloJuan/IlloJuan/added_IVF593_Flat_nprobe_1_IlloJuan_v2.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3bce6d0a1f554f5b5e38bb8f5d72e15a05c2383261f72e4fb5c06c2d714763f
3
+ size 73162459
models/IlloJuan/model_info.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "IlloJuan": {
3
+ "title": "IlloJuan",
4
+ "model_path": "IlloJuan.pth",
5
+ "feature_retrieval_library": "added_IVF593_Flat_nprobe_1_IlloJuan_v2.index",
6
+ "model_path_checksum": "06a20ffd281284950537ebdbd227b8a9",
7
+ "index_path_checksum": "fcae1daa14cc24d862ca09216a02595c"
8
+ }
9
+ }
models/Quevedo/Quevedo/added_IVF2301_Flat_nprobe_10.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fdfabf3c2a559c0c5649f9991e667157bb14440a49860dce02e06ec26a34846
3
+ size 95005027
models/Quevedo/Quevedo/quevedo.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:071f9b5e30ae0c219b47de25a9300b4097f5a7d8025946088ce5d505d9ae6227
3
+ size 55026095
models/Quevedo/model_info.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Quevedo": {
3
+ "title": "Quevedo",
4
+ "model_path": "quevedo.pth",
5
+ "feature_retrieval_library": "added_IVF2301_Flat_nprobe_10.index",
6
+ "model_path_checksum": "6325e08a29a9cbfd0625dfe34be24a0c",
7
+ "index_path_checksum": "924db9553a0a70538a53e625fbadde40"
8
+ }
9
+ }
models/Shadoune666/Shadoune666/Shadoune666.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1447dd914d38c33b377b913c5ebc5f9bddf5a71195e2bb9b400af9bde6735e91
3
+ size 55225115
models/Shadoune666/Shadoune666/added_IVF716_Flat_nprobe_1_Shadoune666_v2.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf519d09b556d5052f68a0be8628d10071e294de284291810b9ed88d2f8e5981
3
+ size 88328379
models/Shadoune666/model_info.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Shadoune666": {
3
+ "title": "Shadoune666",
4
+ "model_path": "Shadoune666.pth",
5
+ "feature_retrieval_library": "added_IVF716_Flat_nprobe_1_Shadoune666_v2.index",
6
+ "model_path_checksum": "7bca067c9055104ebea2bf3f6c7bfde4",
7
+ "index_path_checksum": "b23d61f502025b4f9d967e4de1935d0b"
8
+ }
9
+ }
models/Spreen/Spreen/Spreen.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4be95ea3bb27866c12fafd46ee9997165d025f50e35260b23e2c175c942f6cff
3
+ size 55216420
models/Spreen/Spreen/added_IVF4737_Flat_nprobe_1_Spreen_v2.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:669ca397e95bf51ff0e3f1aabe1ca60c83a5a6c85653ba0f1e550089c63b4305
3
+ size 583660139
models/Spreen/model_info.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Spreen": {
3
+ "title": "Spreen",
4
+ "model_path": "Spreen.pth",
5
+ "feature_retrieval_library": "added_IVF4737_Flat_nprobe_1_Spreen_v2.index",
6
+ "model_path_checksum": "dbdb20d98928308c2d048444efb34b77",
7
+ "index_path_checksum": "213bbb9c387426fadc97c41e97f34b0a"
8
+ }
9
+ }
models/Totakeke/Totakeke/added_IVF256_Flat_nprobe_1_full_totakeke_v2.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b88bd6d822fa8391fa2ca6dd6e0c6eac8ac031385519da3fd87d2a6e8f272151
3
+ size 31588619
models/Totakeke/Totakeke/full_totakeke_e150_s44400.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3fb774cbbb74ae6005a87e370fc01337d3a24f7c5bb8c0962b335e4bd69488b
3
+ size 57590901
models/Totakeke/model_info.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Totakeke": {
3
+ "title": "Totakeke",
4
+ "model_path": "full_totakeke_e150_s44400.pth",
5
+ "feature_retrieval_library": "added_IVF256_Flat_nprobe_1_full_totakeke_v2.index",
6
+ "model_path_checksum": "ccb3a07fd141c6e918eca8a969d95c5d",
7
+ "index_path_checksum": "d4b2d2662efdc0bcb8f3610a686525c9"
8
+ }
9
+ }
models/Vegetta777/Vegetta777/Vegetta777.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f13483615d05c1b31a4eb0fbee35ac5dca9f9a48a36a0165b947c08303ce75e5
3
+ size 55218256
models/Vegetta777/Vegetta777/added_IVF2021_Flat_nprobe_1_Vegetta777_v2.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd713395db0957eb626a40d096ee29ec8bebe19fac3d9ce356a3e587920c71d8
3
+ size 248990419
models/Vegetta777/model_info.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Vegetta777": {
3
+ "title": "Vegetta777",
4
+ "model_path": "Vegetta777.pth",
5
+ "feature_retrieval_library": "added_IVF2021_Flat_nprobe_1_Vegetta777_v2.index",
6
+ "model_path_checksum": "9bc80abfc506e98056e1db7b10000dea",
7
+ "index_path_checksum": "5a53e6f6a997e892e5fb26a7a6959550"
8
+ }
9
+ }
models/Villager/Villager/added_IVF81_Flat_nprobe_1_v2.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8b8f5b445b1a1c3ee7155f3a121075399ca5b5a01032c459168b739c7c633e5
3
+ size 10013219
models/Villager/Villager/villager.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2d94a1084bc9d65cd60ab7fb229a83476e79f8b75ec89e7343d0b9352ea1739
3
+ size 55223738
models/Villager/model_info.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Villager": {
3
+ "title": "Villager",
4
+ "model_path": "villager.pth",
5
+ "feature_retrieval_library": "added_IVF81_Flat_nprobe_1_v2.index",
6
+ "model_path_checksum": "587b2be8f6fa60f7533dddf1a007e2fa",
7
+ "index_path_checksum": "13ef69f0f2181f77d45c4d36d7364f2b"
8
+ }
9
+ }
models/WalterWhite/WalterWhite/added_IVF458_Flat_nprobe_1_ww2_v2.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2030573a1685644ec51b816bb93194d813a30aa7e066fe27b849dc1e80f7706
3
+ size 56527379