TIMBOVILL commited on
Commit
434756a
1 Parent(s): d9f92fc

Upload 9 files

Browse files
rvc/lib/tools/gdown.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function
2
+
3
+ import json
4
+ import os
5
+ import os.path as osp
6
+ import re
7
+ import warnings
8
+ from six.moves import urllib_parse
9
+ import shutil
10
+ import sys
11
+ import tempfile
12
+ import textwrap
13
+ import time
14
+
15
+ import requests
16
+ import six
17
+ import tqdm
18
+
19
+ def indent(text, prefix):
20
+ def prefixed_lines():
21
+ for line in text.splitlines(True):
22
+ yield (prefix + line if line.strip() else line)
23
+
24
+ return "".join(prefixed_lines())
25
+
26
+ class FileURLRetrievalError(Exception):
27
+ pass
28
+
29
+
30
+ class FolderContentsMaximumLimitError(Exception):
31
+ pass
32
+
33
+ def parse_url(url, warning=True):
34
+ """Parse URLs especially for Google Drive links.
35
+
36
+ file_id: ID of file on Google Drive.
37
+ is_download_link: Flag if it is download link of Google Drive.
38
+ """
39
+ parsed = urllib_parse.urlparse(url)
40
+ query = urllib_parse.parse_qs(parsed.query)
41
+ is_gdrive = parsed.hostname in ["drive.google.com", "docs.google.com"]
42
+ is_download_link = parsed.path.endswith("/uc")
43
+
44
+ if not is_gdrive:
45
+ return is_gdrive, is_download_link
46
+
47
+ file_id = None
48
+ if "id" in query:
49
+ file_ids = query["id"]
50
+ if len(file_ids) == 1:
51
+ file_id = file_ids[0]
52
+ else:
53
+ patterns = [
54
+ r"^/file/d/(.*?)/(edit|view)$",
55
+ r"^/file/u/[0-9]+/d/(.*?)/(edit|view)$",
56
+ r"^/document/d/(.*?)/(edit|htmlview|view)$",
57
+ r"^/document/u/[0-9]+/d/(.*?)/(edit|htmlview|view)$",
58
+ r"^/presentation/d/(.*?)/(edit|htmlview|view)$",
59
+ r"^/presentation/u/[0-9]+/d/(.*?)/(edit|htmlview|view)$",
60
+ r"^/spreadsheets/d/(.*?)/(edit|htmlview|view)$",
61
+ r"^/spreadsheets/u/[0-9]+/d/(.*?)/(edit|htmlview|view)$",
62
+ ]
63
+ for pattern in patterns:
64
+ match = re.match(pattern, parsed.path)
65
+ if match:
66
+ file_id = match.groups()[0]
67
+ break
68
+
69
+ if warning and not is_download_link:
70
+ warnings.warn(
71
+ "You specified a Google Drive link that is not the correct link "
72
+ "to download a file. You might want to try `--fuzzy` option "
73
+ "or the following url: {url}".format(
74
+ url="https://drive.google.com/uc?id={}".format(file_id)
75
+ )
76
+ )
77
+
78
+ return file_id, is_download_link
79
+
80
+
81
+ CHUNK_SIZE = 512 * 1024 # 512KB
82
+ home = osp.expanduser("~")
83
+
84
+
85
+ def get_url_from_gdrive_confirmation(contents):
86
+ url = ""
87
+ m = re.search(r'href="(\/uc\?export=download[^"]+)', contents)
88
+ if m:
89
+ url = "https://docs.google.com" + m.groups()[0]
90
+ url = url.replace("&", "&")
91
+ return url
92
+
93
+ m = re.search(r'href="/open\?id=([^"]+)"', contents)
94
+ if m:
95
+ url = m.groups()[0]
96
+ uuid = re.search(r'<input\s+type="hidden"\s+name="uuid"\s+value="([^"]+)"', contents)
97
+ uuid = uuid.groups()[0]
98
+ url = "https://drive.usercontent.google.com/download?id=" + url + "&confirm=t&uuid=" + uuid
99
+ return url
100
+
101
+
102
+ m = re.search(r'"downloadUrl":"([^"]+)', contents)
103
+ if m:
104
+ url = m.groups()[0]
105
+ url = url.replace("\\u003d", "=")
106
+ url = url.replace("\\u0026", "&")
107
+ return url
108
+
109
+ m = re.search(r'<p class="uc-error-subcaption">(.*)</p>', contents)
110
+ if m:
111
+ error = m.groups()[0]
112
+ raise FileURLRetrievalError(error)
113
+
114
+ raise FileURLRetrievalError(
115
+ "Cannot retrieve the public link of the file. "
116
+ "You may need to change the permission to "
117
+ "'Anyone with the link', or have had many accesses."
118
+ )
119
+ def _get_session(proxy, use_cookies, return_cookies_file=False):
120
+ sess = requests.session()
121
+
122
+ sess.headers.update(
123
+ {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6)"}
124
+ )
125
+
126
+ if proxy is not None:
127
+ sess.proxies = {"http": proxy, "https": proxy}
128
+ print("Using proxy:", proxy, file=sys.stderr)
129
+
130
+ # Load cookies if exists
131
+ cookies_file = osp.join(home, ".cache/gdown/cookies.json")
132
+ if osp.exists(cookies_file) and use_cookies:
133
+ with open(cookies_file) as f:
134
+ cookies = json.load(f)
135
+ for k, v in cookies:
136
+ sess.cookies[k] = v
137
+
138
+ if return_cookies_file:
139
+ return sess, cookies_file
140
+ else:
141
+ return sess
142
+
143
+
144
+ def download(
145
+ url=None,
146
+ output=None,
147
+ quiet=False,
148
+ proxy=None,
149
+ speed=None,
150
+ use_cookies=True,
151
+ verify=True,
152
+ id=None,
153
+ fuzzy=True,
154
+ resume=False,
155
+ format=None,
156
+ ):
157
+ """Download file from URL.
158
+
159
+ Parameters
160
+ ----------
161
+ url: str
162
+ URL. Google Drive URL is also supported.
163
+ output: str
164
+ Output filename. Default is basename of URL.
165
+ quiet: bool
166
+ Suppress terminal output. Default is False.
167
+ proxy: str
168
+ Proxy.
169
+ speed: float
170
+ Download byte size per second (e.g., 256KB/s = 256 * 1024).
171
+ use_cookies: bool
172
+ Flag to use cookies. Default is True.
173
+ verify: bool or string
174
+ Either a bool, in which case it controls whether the server's TLS
175
+ certificate is verified, or a string, in which case it must be a path
176
+ to a CA bundle to use. Default is True.
177
+ id: str
178
+ Google Drive's file ID.
179
+ fuzzy: bool
180
+ Fuzzy extraction of Google Drive's file Id. Default is False.
181
+ resume: bool
182
+ Resume the download from existing tmp file if possible.
183
+ Default is False.
184
+ format: str, optional
185
+ Format of Google Docs, Spreadsheets and Slides. Default is:
186
+ - Google Docs: 'docx'
187
+ - Google Spreadsheet: 'xlsx'
188
+ - Google Slides: 'pptx'
189
+
190
+ Returns
191
+ -------
192
+ output: str
193
+ Output filename.
194
+ """
195
+ if not (id is None) ^ (url is None):
196
+ raise ValueError("Either url or id has to be specified")
197
+ if id is not None:
198
+ url = "https://drive.google.com/uc?id={id}".format(id=id)
199
+
200
+ url_origin = url
201
+
202
+ sess, cookies_file = _get_session(
203
+ proxy=proxy, use_cookies=use_cookies, return_cookies_file=True
204
+ )
205
+
206
+ gdrive_file_id, is_gdrive_download_link = parse_url(url, warning=not fuzzy)
207
+
208
+ if fuzzy and gdrive_file_id:
209
+ # overwrite the url with fuzzy match of a file id
210
+ url = "https://drive.google.com/uc?id={id}".format(id=gdrive_file_id)
211
+ url_origin = url
212
+ is_gdrive_download_link = True
213
+
214
+
215
+
216
+ while True:
217
+ res = sess.get(url, stream=True, verify=verify)
218
+
219
+ if url == url_origin and res.status_code == 500:
220
+ # The file could be Google Docs or Spreadsheets.
221
+ url = "https://drive.google.com/open?id={id}".format(
222
+ id=gdrive_file_id
223
+ )
224
+ continue
225
+
226
+ if res.headers["Content-Type"].startswith("text/html"):
227
+ m = re.search("<title>(.+)</title>", res.text)
228
+ if m and m.groups()[0].endswith(" - Google Docs"):
229
+ url = (
230
+ "https://docs.google.com/document/d/{id}/export"
231
+ "?format={format}".format(
232
+ id=gdrive_file_id,
233
+ format="docx" if format is None else format,
234
+ )
235
+ )
236
+ continue
237
+ elif m and m.groups()[0].endswith(" - Google Sheets"):
238
+ url = (
239
+ "https://docs.google.com/spreadsheets/d/{id}/export"
240
+ "?format={format}".format(
241
+ id=gdrive_file_id,
242
+ format="xlsx" if format is None else format,
243
+ )
244
+ )
245
+ continue
246
+ elif m and m.groups()[0].endswith(" - Google Slides"):
247
+ url = (
248
+ "https://docs.google.com/presentation/d/{id}/export"
249
+ "?format={format}".format(
250
+ id=gdrive_file_id,
251
+ format="pptx" if format is None else format,
252
+ )
253
+ )
254
+ continue
255
+ elif (
256
+ "Content-Disposition" in res.headers
257
+ and res.headers["Content-Disposition"].endswith("pptx")
258
+ and format not in {None, "pptx"}
259
+ ):
260
+ url = (
261
+ "https://docs.google.com/presentation/d/{id}/export"
262
+ "?format={format}".format(
263
+ id=gdrive_file_id,
264
+ format="pptx" if format is None else format,
265
+ )
266
+ )
267
+ continue
268
+
269
+ if use_cookies:
270
+ if not osp.exists(osp.dirname(cookies_file)):
271
+ os.makedirs(osp.dirname(cookies_file))
272
+ # Save cookies
273
+ with open(cookies_file, "w") as f:
274
+ cookies = [
275
+ (k, v)
276
+ for k, v in sess.cookies.items()
277
+ if not k.startswith("download_warning_")
278
+ ]
279
+ json.dump(cookies, f, indent=2)
280
+
281
+ if "Content-Disposition" in res.headers:
282
+ # This is the file
283
+ break
284
+ if not (gdrive_file_id and is_gdrive_download_link):
285
+ break
286
+
287
+ # Need to redirect with confirmation
288
+ try:
289
+ url = get_url_from_gdrive_confirmation(res.text)
290
+ except FileURLRetrievalError as e:
291
+ message = (
292
+ "Failed to retrieve file url:\n\n{}\n\n"
293
+ "You may still be able to access the file from the browser:"
294
+ "\n\n\t{}\n\n"
295
+ "but Gdown can't. Please check connections and permissions."
296
+ ).format(
297
+ indent("\n".join(textwrap.wrap(str(e))), prefix="\t"),
298
+ url_origin,
299
+ )
300
+ raise FileURLRetrievalError(message)
301
+
302
+ if gdrive_file_id and is_gdrive_download_link:
303
+ content_disposition = six.moves.urllib_parse.unquote(
304
+ res.headers["Content-Disposition"]
305
+ )
306
+
307
+ m = re.search(r"filename\*=UTF-8''(.*)", content_disposition)
308
+ if not m:
309
+ m = re.search(r'filename=["\']?(.*?)["\']?$', content_disposition)
310
+ filename_from_url = m.groups()[0]
311
+ filename_from_url = filename_from_url.replace(osp.sep, "_")
312
+ else:
313
+ filename_from_url = osp.basename(url)
314
+
315
+ if output is None:
316
+ output = filename_from_url
317
+
318
+ output_is_path = isinstance(output, six.string_types)
319
+ if output_is_path and output.endswith(osp.sep):
320
+ if not osp.exists(output):
321
+ os.makedirs(output)
322
+ output = osp.join(output, filename_from_url)
323
+
324
+ if output_is_path:
325
+ existing_tmp_files = []
326
+ for file in os.listdir(osp.dirname(output) or "."):
327
+ if file.startswith(osp.basename(output)):
328
+ existing_tmp_files.append(osp.join(osp.dirname(output), file))
329
+ if resume and existing_tmp_files:
330
+ if len(existing_tmp_files) != 1:
331
+ print(
332
+ "There are multiple temporary files to resume:",
333
+ file=sys.stderr,
334
+ )
335
+ print("\n")
336
+ for file in existing_tmp_files:
337
+ print("\t", file, file=sys.stderr)
338
+ print("\n")
339
+ print(
340
+ "Please remove them except one to resume downloading.",
341
+ file=sys.stderr,
342
+ )
343
+ return
344
+ tmp_file = existing_tmp_files[0]
345
+ else:
346
+ resume = False
347
+ # mkstemp is preferred, but does not work on Windows
348
+ # https://github.com/wkentaro/gdown/issues/153
349
+ tmp_file = tempfile.mktemp(
350
+ suffix=tempfile.template,
351
+ prefix=osp.basename(output),
352
+ dir=osp.dirname(output),
353
+ )
354
+ f = open(tmp_file, "ab")
355
+ else:
356
+ tmp_file = None
357
+ f = output
358
+
359
+ if tmp_file is not None and f.tell() != 0:
360
+ headers = {"Range": "bytes={}-".format(f.tell())}
361
+ res = sess.get(url, headers=headers, stream=True, verify=verify)
362
+
363
+ if not quiet:
364
+ # print("Downloading...", file=sys.stderr)
365
+ if resume:
366
+ print("Resume:", tmp_file, file=sys.stderr)
367
+ # if url_origin != url:
368
+ # print("From (original):", url_origin, file=sys.stderr)
369
+ # print("From (redirected):", url, file=sys.stderr)
370
+ # else:
371
+ # print("From:", url, file=sys.stderr)
372
+ print(
373
+ "To:",
374
+ osp.abspath(output) if output_is_path else output,
375
+ file=sys.stderr,
376
+ )
377
+
378
+ try:
379
+ total = res.headers.get("Content-Length")
380
+ if total is not None:
381
+ total = int(total)
382
+ if not quiet:
383
+ pbar = tqdm.tqdm(total=total, unit="B", unit_scale=True)
384
+ t_start = time.time()
385
+ for chunk in res.iter_content(chunk_size=CHUNK_SIZE):
386
+ f.write(chunk)
387
+ if not quiet:
388
+ pbar.update(len(chunk))
389
+ if speed is not None:
390
+ elapsed_time_expected = 1.0 * pbar.n / speed
391
+ elapsed_time = time.time() - t_start
392
+ if elapsed_time < elapsed_time_expected:
393
+ time.sleep(elapsed_time_expected - elapsed_time)
394
+ if not quiet:
395
+ pbar.close()
396
+ if tmp_file:
397
+ f.close()
398
+ shutil.move(tmp_file, output)
399
+ finally:
400
+ sess.close()
401
+
402
+ return output
rvc/lib/tools/launch_tensorboard.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from tensorboard import program
3
+
4
+ log_path = "logs"
5
+
6
+ if __name__ == "__main__":
7
+ tb = program.TensorBoard()
8
+ tb.configure(argv=[None, "--logdir", log_path])
9
+ url = tb.launch()
10
+ print(
11
+ f"Access the tensorboard using the following link:\n{url}?pinnedCards=%5B%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Ftotal%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fd%2Ftotal%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Fkl%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Fmel%22%7D%5D"
12
+ )
13
+
14
+ while True:
15
+ time.sleep(600)
rvc/lib/tools/model_download.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import wget
4
+ import zipfile
5
+ from bs4 import BeautifulSoup
6
+ import requests
7
+ from urllib.parse import unquote
8
+ import re
9
+
10
+ def find_folder_parent(search_dir, folder_name):
11
+ for dirpath, dirnames, _ in os.walk(search_dir):
12
+ if folder_name in dirnames:
13
+ return os.path.abspath(dirpath)
14
+ return None
15
+
16
+ now_dir = os.getcwd()
17
+ sys.path.append(now_dir)
18
+
19
+ import rvc.lib.tools.gdown as gdown
20
+
21
+ file_path = find_folder_parent(now_dir, "logs")
22
+
23
+ zips_path = os.getcwd() + "/logs/zips"
24
+
25
+
26
+ def search_pth_index(folder):
27
+ pth_paths = [
28
+ os.path.join(folder, file)
29
+ for file in os.listdir(folder)
30
+ if os.path.isfile(os.path.join(folder, file)) and file.endswith(".pth")
31
+ ]
32
+ index_paths = [
33
+ os.path.join(folder, file)
34
+ for file in os.listdir(folder)
35
+ if os.path.isfile(os.path.join(folder, file)) and file.endswith(".index")
36
+ ]
37
+
38
+ return pth_paths, index_paths
39
+
40
+
41
+ def get_mediafire_download_link(url):
42
+ response = requests.get(url)
43
+ response.raise_for_status()
44
+ soup = BeautifulSoup(response.text, "html.parser")
45
+ download_button = soup.find(
46
+ "a", {"class": "input popsok", "aria-label": "Download file"}
47
+ )
48
+ if download_button:
49
+ download_link = download_button.get("href")
50
+ return download_link
51
+ else:
52
+ return None
53
+
54
+
55
+ def download_from_url(url):
56
+ os.makedirs(zips_path, exist_ok=True)
57
+ if url != "":
58
+ if "drive.google.com" in url:
59
+ if "file/d/" in url:
60
+ file_id = url.split("file/d/")[1].split("/")[0]
61
+ elif "id=" in url:
62
+ file_id = url.split("id=")[1].split("&")[0]
63
+ else:
64
+ return None
65
+
66
+ if file_id:
67
+ os.chdir(zips_path)
68
+ try:
69
+ gdown.download(
70
+ f"https://drive.google.com/uc?id={file_id}",
71
+ quiet=False,
72
+ fuzzy=True,
73
+ )
74
+ except Exception as error:
75
+ error_message = str(error)
76
+ if (
77
+ "Too many users have viewed or downloaded this file recently"
78
+ in error_message
79
+ ):
80
+ os.chdir(now_dir)
81
+ return "too much use"
82
+ elif (
83
+ "Cannot retrieve the public link of the file." in error_message
84
+ ):
85
+ os.chdir(now_dir)
86
+ return "private link"
87
+ else:
88
+ print(error_message)
89
+ os.chdir(now_dir)
90
+ return None
91
+
92
+ elif "/blob/" in url or "/resolve/" in url:
93
+ os.chdir(zips_path)
94
+ if "/blob/" in url:
95
+ url = url.replace("/blob/", "/resolve/")
96
+
97
+ response = requests.get(url, stream=True)
98
+ if response.status_code == 200:
99
+ file_name = url.split("/")[-1]
100
+ file_name = unquote(file_name)
101
+
102
+ file_name = re.sub(r"[^a-zA-Z0-9_.-]", "_", file_name)
103
+
104
+ total_size_in_bytes = int(response.headers.get("content-length", 0))
105
+ block_size = 1024
106
+ progress_bar_length = 50
107
+ progress = 0
108
+
109
+ with open(os.path.join(zips_path, file_name), "wb") as file:
110
+ for data in response.iter_content(block_size):
111
+ file.write(data)
112
+ progress += len(data)
113
+ progress_percent = int((progress / total_size_in_bytes) * 100)
114
+ num_dots = int(
115
+ (progress / total_size_in_bytes) * progress_bar_length
116
+ )
117
+ progress_bar = (
118
+ "["
119
+ + "." * num_dots
120
+ + " " * (progress_bar_length - num_dots)
121
+ + "]"
122
+ )
123
+ print(
124
+ f"{progress_percent}% {progress_bar} {progress}/{total_size_in_bytes} ",
125
+ end="\r",
126
+ )
127
+ if progress_percent == 100:
128
+ print("\n")
129
+
130
+ else:
131
+ os.chdir(now_dir)
132
+ return None
133
+ elif "/tree/main" in url:
134
+ os.chdir(zips_path)
135
+ response = requests.get(url)
136
+ soup = BeautifulSoup(response.content, "html.parser")
137
+ temp_url = ""
138
+ for link in soup.find_all("a", href=True):
139
+ if link["href"].endswith(".zip"):
140
+ temp_url = link["href"]
141
+ break
142
+ if temp_url:
143
+ url = temp_url
144
+ url = url.replace("blob", "resolve")
145
+ if "huggingface.co" not in url:
146
+ url = "https://huggingface.co" + url
147
+
148
+ wget.download(url)
149
+ else:
150
+ os.chdir(now_dir)
151
+ return None
152
+ else:
153
+ try:
154
+ os.chdir(zips_path)
155
+ wget.download(url)
156
+ except Exception as error:
157
+ os.chdir(now_dir)
158
+ print(error)
159
+ return None
160
+
161
+ for currentPath, _, zipFiles in os.walk(zips_path):
162
+ for Files in zipFiles:
163
+ filePart = Files.split(".")
164
+ extensionFile = filePart[len(filePart) - 1]
165
+ filePart.pop()
166
+ nameFile = "_".join(filePart)
167
+ realPath = os.path.join(currentPath, Files)
168
+ os.rename(realPath, nameFile + "." + extensionFile)
169
+
170
+ os.chdir(now_dir)
171
+ return "downloaded"
172
+
173
+ os.chdir(now_dir)
174
+ return None
175
+
176
+
177
+ def extract_and_show_progress(zipfile_path, unzips_path):
178
+ try:
179
+ with zipfile.ZipFile(zipfile_path, "r") as zip_ref:
180
+ for file_info in zip_ref.infolist():
181
+ zip_ref.extract(file_info, unzips_path)
182
+ os.remove(zipfile_path)
183
+ return True
184
+ except Exception as error:
185
+ print(error)
186
+ return False
187
+
188
+
189
+ def unzip_file(zip_path, zip_file_name):
190
+ zip_file_path = os.path.join(zip_path, zip_file_name + ".zip")
191
+ extract_path = os.path.join(file_path, zip_file_name)
192
+ with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
193
+ zip_ref.extractall(extract_path)
194
+ os.remove(zip_file_path)
195
+
196
+
197
+ url = sys.argv[1]
198
+ verify = download_from_url(url)
199
+
200
+ if verify == "downloaded":
201
+ extract_folder_path = ""
202
+ for filename in os.listdir(zips_path):
203
+ if filename.endswith(".zip"):
204
+ zipfile_path = os.path.join(zips_path, filename)
205
+ print("Proceeding with the extraction...")
206
+
207
+ model_name = os.path.basename(zipfile_path)
208
+ extract_folder_path = os.path.join(
209
+ "logs",
210
+ os.path.normpath(str(model_name).replace(".zip", "")),
211
+ )
212
+
213
+ success = extract_and_show_progress(zipfile_path, extract_folder_path)
214
+ if success:
215
+ print(f"Model {model_name} downloaded!")
216
+ else:
217
+ print(f"Error downloading {model_name}")
218
+ sys.exit()
219
+ if extract_folder_path == "":
220
+ print("No zip founded...")
221
+ sys.exit()
222
+ result = search_pth_index(extract_folder_path)
223
+ else:
224
+ message = "Error"
225
+ sys.exit()
rvc/lib/tools/prerequisites_download.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import wget
3
+ import sys
4
+
5
+ url_base = "https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main"
6
+ models_download = [
7
+ (
8
+ "pretrained/",
9
+ [
10
+ "D32k.pth",
11
+ "D40k.pth",
12
+ "D48k.pth",
13
+ "G32k.pth",
14
+ "G40k.pth",
15
+ "G48k.pth",
16
+ "f0D32k.pth",
17
+ "f0D40k.pth",
18
+ "f0D48k.pth",
19
+ "f0G32k.pth",
20
+ "f0G40k.pth",
21
+ "f0G48k.pth",
22
+ ],
23
+ ),
24
+ (
25
+ "pretrained_v2/",
26
+ [
27
+ "D32k.pth",
28
+ "D40k.pth",
29
+ "D48k.pth",
30
+ "G32k.pth",
31
+ "G40k.pth",
32
+ "G48k.pth",
33
+ "f0D32k.pth",
34
+ "f0D40k.pth",
35
+ "f0D48k.pth",
36
+ "f0G32k.pth",
37
+ "f0G40k.pth",
38
+ "f0G48k.pth",
39
+ ],
40
+ ),
41
+ ]
42
+
43
+ models_file = [
44
+ "hubert_base.pt",
45
+ "rmvpe.pt",
46
+ # "rmvpe.onnx",
47
+ ]
48
+
49
+ executables_file = [
50
+ "ffmpeg.exe",
51
+ "ffprobe.exe",
52
+ ]
53
+
54
+ folder_mapping = {
55
+ "pretrained/": "rvc/pretraineds/pretrained_v1/",
56
+ "pretrained_v2/": "rvc/pretraineds/pretrained_v2/",
57
+ }
58
+
59
+ for file_name in models_file:
60
+ destination_path = os.path.join(file_name)
61
+ url = f"{url_base}/{file_name}"
62
+ if not os.path.exists(destination_path):
63
+ os.makedirs(os.path.dirname(destination_path) or ".", exist_ok=True)
64
+ print(f"\nDownloading {url} to {destination_path}...")
65
+ wget.download(url, out=destination_path)
66
+
67
+ for file_name in executables_file:
68
+ if sys.platform == "win32":
69
+ destination_path = os.path.join(file_name)
70
+ url = f"{url_base}/{file_name}"
71
+ if not os.path.exists(destination_path):
72
+ os.makedirs(os.path.dirname(destination_path) or ".", exist_ok=True)
73
+ print(f"\nDownloading {url} to {destination_path}...")
74
+ wget.download(url, out=destination_path)
75
+
76
+ for remote_folder, file_list in models_download:
77
+ local_folder = folder_mapping.get(remote_folder, "")
78
+ for file in file_list:
79
+ destination_path = os.path.join(local_folder, file)
80
+ url = f"{url_base}/{remote_folder}{file}"
81
+ if not os.path.exists(destination_path):
82
+ os.makedirs(os.path.dirname(destination_path) or ".", exist_ok=True)
83
+ print(f"\nDownloading {url} to {destination_path}...")
84
+ wget.download(url, out=destination_path)
rvc/lib/tools/pretrained_selector.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def pretrained_selector(pitch_guidance):
2
+ if pitch_guidance:
3
+ return {
4
+ "v1": {
5
+ "32000": (
6
+ "rvc/pretraineds/pretrained_v1/f0G32k.pth",
7
+ "rvc/pretraineds/pretrained_v1/f0D32k.pth",
8
+ ),
9
+ "40000": (
10
+ "rvc/pretraineds/pretrained_v1/f0G40k.pth",
11
+ "rvc/pretraineds/pretrained_v1/f0D40k.pth",
12
+ ),
13
+ "48000": (
14
+ "rvc/pretraineds/pretrained_v1/f0G48k.pth",
15
+ "rvc/pretraineds/pretrained_v1/f0D48k.pth",
16
+ ),
17
+ },
18
+ "v2": {
19
+ "32000": (
20
+ "rvc/pretraineds/pretrained_v2/f0G32k.pth",
21
+ "rvc/pretraineds/pretrained_v2/f0D32k.pth",
22
+ ),
23
+ "40000": (
24
+ "rvc/pretraineds/pretrained_v2/f0G40k.pth",
25
+ "rvc/pretraineds/pretrained_v2/f0D40k.pth",
26
+ ),
27
+ "48000": (
28
+ "rvc/pretraineds/pretrained_v2/f0G48k.pth",
29
+ "rvc/pretraineds/pretrained_v2/f0D48k.pth",
30
+ ),
31
+ },
32
+ }
33
+ else:
34
+ return {
35
+ "v1": {
36
+ "32000": (
37
+ "rvc/pretraineds/pretrained_v1/G32k.pth",
38
+ "rvc/pretraineds/pretrained_v1/D32k.pth",
39
+ ),
40
+ "40000": (
41
+ "rvc/pretraineds/pretrained_v1/G40k.pth",
42
+ "rvc/pretraineds/pretrained_v1/D40k.pth",
43
+ ),
44
+ "48000": (
45
+ "rvc/pretraineds/pretrained_v1/G48k.pth",
46
+ "rvc/pretraineds/pretrained_v1/D48k.pth",
47
+ ),
48
+ },
49
+ "v2": {
50
+ "32000": (
51
+ "rvc/pretraineds/pretrained_v2/G32k.pth",
52
+ "rvc/pretraineds/pretrained_v2/D32k.pth",
53
+ ),
54
+ "40000": (
55
+ "rvc/pretraineds/pretrained_v2/G40k.pth",
56
+ "rvc/pretraineds/pretrained_v2/D40k.pth",
57
+ ),
58
+ "48000": (
59
+ "rvc/pretraineds/pretrained_v2/G48k.pth",
60
+ "rvc/pretraineds/pretrained_v2/D48k.pth",
61
+ ),
62
+ },
63
+ }
rvc/lib/tools/split_audio.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydub.silence import detect_nonsilent
2
+ from pydub import AudioSegment
3
+ import numpy as np
4
+ import re
5
+ import os
6
+ import unicodedata
7
+
8
+ def format_title(title):
9
+ formatted_title = unicodedata.normalize('NFKD', title).encode('ascii', 'ignore').decode('utf-8')
10
+ formatted_title = re.sub(r'[\u2500-\u257F]+', '', title)
11
+ formatted_title = re.sub(r'[^\w\s-]', '', title)
12
+ formatted_title = re.sub(r'\s+', '_', formatted_title)
13
+ return formatted_title
14
+
15
+
16
+ def process_audio(file_path):
17
+ try:
18
+ # load audio file
19
+ song = AudioSegment.from_file(file_path)
20
+
21
+ print(f"Ignore the warning if you saw any...")
22
+
23
+ # set silence threshold and duration
24
+ silence_thresh = -70 # dB
25
+ min_silence_len = 750 # ms, adjust as needed
26
+
27
+ # detect nonsilent parts
28
+ nonsilent_parts = detect_nonsilent(song, min_silence_len=min_silence_len, silence_thresh=silence_thresh)
29
+
30
+ # Create a new directory to store chunks
31
+ file_dir = os.path.dirname(file_path)
32
+ file_name = os.path.basename(file_path).split('.')[0]
33
+ file_name = format_title(file_name)
34
+ new_dir_path = os.path.join(file_dir, file_name)
35
+ os.makedirs(new_dir_path, exist_ok=True)
36
+
37
+ # Check if timestamps file exists, if so delete it
38
+ timestamps_file = os.path.join(file_dir, f"{file_name}_timestamps.txt")
39
+ if os.path.isfile(timestamps_file):
40
+ os.remove(timestamps_file)
41
+
42
+ # export chunks and save start times
43
+ segment_count = 0
44
+ for i, (start_i, end_i) in enumerate(nonsilent_parts):
45
+ chunk = song[start_i:end_i]
46
+ chunk_file_path = os.path.join(new_dir_path, f"chunk{i}.wav")
47
+ chunk.export(chunk_file_path, format="wav")
48
+
49
+ print(f"Segment {i} created!")
50
+ segment_count += 1
51
+
52
+ # write start times to file
53
+ with open(timestamps_file, "a", encoding="utf-8") as f:
54
+ f.write(f"{chunk_file_path} starts at {start_i} ms\n")
55
+
56
+ print(f"Total segments created: {segment_count}")
57
+ print(f"Split all chunks for {file_path} successfully!")
58
+
59
+ return "Finish", new_dir_path
60
+
61
+ except Exception as e:
62
+ print(f"An error occurred: {e}")
63
+ return "Error", None
64
+
65
+
66
+ def merge_audio(timestamps_file):
67
+ try:
68
+ # Extract prefix from the timestamps filename
69
+ prefix = os.path.basename(timestamps_file).replace('_timestamps.txt', '')
70
+ timestamps_dir = os.path.dirname(timestamps_file)
71
+
72
+ # Open the timestamps file
73
+ with open(timestamps_file, "r", encoding="utf-8") as f:
74
+ lines = f.readlines()
75
+
76
+ # Initialize empty list to hold audio segments
77
+ audio_segments = []
78
+ last_end_time = 0
79
+
80
+ print(f"Processing file: {timestamps_file}")
81
+
82
+ for line in lines:
83
+ # Extract filename and start time from line
84
+ match = re.search(r"(chunk\d+.wav) starts at (\d+) ms", line)
85
+ if match:
86
+ filename, start_time = match.groups()
87
+ start_time = int(start_time)
88
+
89
+ # Construct the complete path to the chunk file
90
+ chunk_file = os.path.join(timestamps_dir, prefix, filename)
91
+
92
+ # Add silence from last_end_time to start_time
93
+ silence_duration = max(start_time - last_end_time, 0)
94
+ silence = AudioSegment.silent(duration=silence_duration)
95
+ audio_segments.append(silence)
96
+
97
+ # Load audio file and append to list
98
+ audio = AudioSegment.from_wav(chunk_file)
99
+ audio_segments.append(audio)
100
+
101
+ # Update last_end_time
102
+ last_end_time = start_time + len(audio)
103
+
104
+ print(f"Processed chunk: {chunk_file}")
105
+
106
+ # Concatenate all audio_segments and export
107
+ merged_audio = sum(audio_segments)
108
+ merged_audio_np = np.array(merged_audio.get_array_of_samples())
109
+ #print(f"Exported merged file: {merged_filename}\n")
110
+ return merged_audio.frame_rate, merged_audio_np
111
+
112
+ except Exception as e:
113
+ print(f"An error occurred: {e}")
rvc/lib/tools/tts.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import asyncio
3
+ import edge_tts
4
+
5
+
6
+ async def main():
7
+ text = sys.argv[1]
8
+ voice = sys.argv[2]
9
+ output_file = sys.argv[3]
10
+
11
+ await edge_tts.Communicate(text, voice).save(output_file)
12
+ print(f"TTS with {voice} completed. Output TTS file: '{output_file}'")
13
+
14
+
15
+ if __name__ == "__main__":
16
+ asyncio.run(main())
rvc/lib/tools/tts_voices.json ADDED
The diff for this file is too large to render. See raw diff
 
rvc/lib/tools/validators.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import json
4
+
5
+
6
+ def validate_sampling_rate(value):
7
+ valid_sampling = [
8
+ "32000",
9
+ "40000",
10
+ "48000",
11
+ ]
12
+ if value in valid_sampling:
13
+ return value
14
+ else:
15
+ raise argparse.ArgumentTypeError(
16
+ f"Invalid sampling_rate. Please choose from {valid_sampling} not {value}"
17
+ )
18
+
19
+
20
+ def validate_f0up_key(value):
21
+ f0up_key = int(value)
22
+ if -12 <= f0up_key <= 12:
23
+ return f0up_key
24
+ else:
25
+ raise argparse.ArgumentTypeError(f"f0up_key must be in the range of -12 to +12")
26
+
27
+ def validate_true_false(value):
28
+ valid_tf = [
29
+ "True",
30
+ "False",
31
+ ]
32
+ if value in valid_tf:
33
+ return value
34
+ else:
35
+ raise argparse.ArgumentTypeError(
36
+ f"Invalid true_false. Please choose from {valid_tf} not {value}"
37
+ )
38
+
39
+ def validate_f0method(value):
40
+ valid_f0methods = [
41
+ "pm",
42
+ "dio",
43
+ "crepe",
44
+ "crepe-tiny",
45
+ "harvest",
46
+ "rmvpe",
47
+ ]
48
+ if value in valid_f0methods:
49
+ return value
50
+ else:
51
+ raise argparse.ArgumentTypeError(
52
+ f"Invalid f0method. Please choose from {valid_f0methods} not {value}"
53
+ )
54
+
55
+ def validate_tts_voices(value):
56
+ json_path = os.path.join("rvc", "lib", "tools", "tts_voices.json")
57
+ with open(json_path, 'r') as file:
58
+ tts_voices_data = json.load(file)
59
+
60
+ # Extrae los valores de "ShortName" del JSON
61
+ short_names = [voice.get("ShortName", "") for voice in tts_voices_data]
62
+ if value in short_names:
63
+ return value
64
+ else:
65
+ raise argparse.ArgumentTypeError(
66
+ f"Invalid voice. Please choose from {short_names} not {value}"
67
+ )