Spaces:
Running
Running
kasper-boy
commited on
Commit
•
de2d875
1
Parent(s):
0e409f1
Update app.py
Browse files
app.py
CHANGED
@@ -18,33 +18,33 @@ def get_wavlm():
|
|
18 |
|
19 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
20 |
|
21 |
-
print("Loading
|
22 |
-
hps = utils.get_hparams_from_file("configs/
|
23 |
freevc = SynthesizerTrn(
|
24 |
hps.data.filter_length // 2 + 1,
|
25 |
hps.train.segment_size // hps.data.hop_length,
|
26 |
**hps.model).to(device)
|
27 |
_ = freevc.eval()
|
28 |
-
_ = utils.load_checkpoint("checkpoints/
|
29 |
smodel = SpeakerEncoder('speaker_encoder/ckpt/pretrained_bak_5805000.pt')
|
30 |
|
31 |
-
print("Loading
|
32 |
-
hps = utils.get_hparams_from_file("configs/
|
33 |
freevc_24 = SynthesizerTrn(
|
34 |
hps.data.filter_length // 2 + 1,
|
35 |
hps.train.segment_size // hps.data.hop_length,
|
36 |
**hps.model).to(device)
|
37 |
_ = freevc_24.eval()
|
38 |
-
_ = utils.load_checkpoint("checkpoints/
|
39 |
|
40 |
-
print("Loading
|
41 |
-
hps = utils.get_hparams_from_file("configs/
|
42 |
freevc_s = SynthesizerTrn(
|
43 |
hps.data.filter_length // 2 + 1,
|
44 |
hps.train.segment_size // hps.data.hop_length,
|
45 |
**hps.model).to(device)
|
46 |
_ = freevc_s.eval()
|
47 |
-
_ = utils.load_checkpoint("checkpoints/
|
48 |
|
49 |
print("Loading WavLM for content...")
|
50 |
cmodel = WavLMModel.from_pretrained("microsoft/wavlm-large").to(device)
|
@@ -54,7 +54,7 @@ def convert(model, src, tgt):
|
|
54 |
# tgt
|
55 |
wav_tgt, _ = librosa.load(tgt, sr=hps.data.sampling_rate)
|
56 |
wav_tgt, _ = librosa.effects.trim(wav_tgt, top_db=20)
|
57 |
-
if model == "
|
58 |
g_tgt = smodel.embed_utterance(wav_tgt)
|
59 |
g_tgt = torch.from_numpy(g_tgt).unsqueeze(0).to(device)
|
60 |
else:
|
@@ -74,30 +74,30 @@ def convert(model, src, tgt):
|
|
74 |
wav_src = torch.from_numpy(wav_src).unsqueeze(0).to(device)
|
75 |
c = cmodel(wav_src).last_hidden_state.transpose(1, 2).to(device)
|
76 |
# infer
|
77 |
-
if model == "
|
78 |
audio = freevc.infer(c, g=g_tgt)
|
79 |
-
elif model == "
|
80 |
audio = freevc_s.infer(c, mel=mel_tgt)
|
81 |
else:
|
82 |
audio = freevc_24.infer(c, g=g_tgt)
|
83 |
audio = audio[0][0].data.cpu().float().numpy()
|
84 |
-
if model == "
|
85 |
write("out.wav", hps.data.sampling_rate, audio)
|
86 |
else:
|
87 |
write("out.wav", 24000, audio)
|
88 |
out = "out.wav"
|
89 |
return out
|
90 |
|
91 |
-
model = gr.Dropdown(choices=["
|
92 |
audio1 = gr.Audio(label="Source Audio", type='filepath')
|
93 |
audio2 = gr.Audio(label="Reference Audio", type='filepath')
|
94 |
inputs = [model, audio1, audio2]
|
95 |
outputs = gr.Audio(label="Output Audio", type='filepath')
|
96 |
|
97 |
-
title = "
|
98 |
-
description = "Gradio Demo for
|
99 |
-
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2210.15418' target='_blank'>Paper</a> | <a href='https://github.com/OlaWod/FreeVC' target='_blank'>Github Repo</a></p>"
|
100 |
|
101 |
-
examples=[["
|
102 |
|
103 |
gr.Interface(convert, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()
|
|
|
18 |
|
19 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
20 |
|
21 |
+
print("Loading CloneVoiceAI...")
|
22 |
+
hps = utils.get_hparams_from_file("configs/CloneVoiceAI.json")
|
23 |
freevc = SynthesizerTrn(
|
24 |
hps.data.filter_length // 2 + 1,
|
25 |
hps.train.segment_size // hps.data.hop_length,
|
26 |
**hps.model).to(device)
|
27 |
_ = freevc.eval()
|
28 |
+
_ = utils.load_checkpoint("checkpoints/CloneVoiceAI.pth", freevc, None)
|
29 |
smodel = SpeakerEncoder('speaker_encoder/ckpt/pretrained_bak_5805000.pt')
|
30 |
|
31 |
+
print("Loading CloneVoiceAI(24k)...")
|
32 |
+
hps = utils.get_hparams_from_file("configs/CloneVoiceAI-24.json")
|
33 |
freevc_24 = SynthesizerTrn(
|
34 |
hps.data.filter_length // 2 + 1,
|
35 |
hps.train.segment_size // hps.data.hop_length,
|
36 |
**hps.model).to(device)
|
37 |
_ = freevc_24.eval()
|
38 |
+
_ = utils.load_checkpoint("checkpoints/CloneVoiceAI-24.pth", freevc_24, None)
|
39 |
|
40 |
+
print("Loading CloneVoiceAI-s...")
|
41 |
+
hps = utils.get_hparams_from_file("configs/CloneVoiceAI-s.json")
|
42 |
freevc_s = SynthesizerTrn(
|
43 |
hps.data.filter_length // 2 + 1,
|
44 |
hps.train.segment_size // hps.data.hop_length,
|
45 |
**hps.model).to(device)
|
46 |
_ = freevc_s.eval()
|
47 |
+
_ = utils.load_checkpoint("checkpoints/CloneVoiceAI-s.pth", freevc_s, None)
|
48 |
|
49 |
print("Loading WavLM for content...")
|
50 |
cmodel = WavLMModel.from_pretrained("microsoft/wavlm-large").to(device)
|
|
|
54 |
# tgt
|
55 |
wav_tgt, _ = librosa.load(tgt, sr=hps.data.sampling_rate)
|
56 |
wav_tgt, _ = librosa.effects.trim(wav_tgt, top_db=20)
|
57 |
+
if model == "CloneVoiceAI" or model == "CloneVoiceAI (24kHz)":
|
58 |
g_tgt = smodel.embed_utterance(wav_tgt)
|
59 |
g_tgt = torch.from_numpy(g_tgt).unsqueeze(0).to(device)
|
60 |
else:
|
|
|
74 |
wav_src = torch.from_numpy(wav_src).unsqueeze(0).to(device)
|
75 |
c = cmodel(wav_src).last_hidden_state.transpose(1, 2).to(device)
|
76 |
# infer
|
77 |
+
if model == "CloneVoiceAI":
|
78 |
audio = freevc.infer(c, g=g_tgt)
|
79 |
+
elif model == "CloneVoiceAI-s":
|
80 |
audio = freevc_s.infer(c, mel=mel_tgt)
|
81 |
else:
|
82 |
audio = freevc_24.infer(c, g=g_tgt)
|
83 |
audio = audio[0][0].data.cpu().float().numpy()
|
84 |
+
if model == "CloneVoiceAI" or model == "CloneVoiceAI-s":
|
85 |
write("out.wav", hps.data.sampling_rate, audio)
|
86 |
else:
|
87 |
write("out.wav", 24000, audio)
|
88 |
out = "out.wav"
|
89 |
return out
|
90 |
|
91 |
+
model = gr.Dropdown(choices=["CloneVoiceAI", "CloneVoiceAI-s", "CloneVoiceAI (24kHz)"], value="CloneVoiceAI",type="value", label="Model")
|
92 |
audio1 = gr.Audio(label="Source Audio", type='filepath')
|
93 |
audio2 = gr.Audio(label="Reference Audio", type='filepath')
|
94 |
inputs = [model, audio1, audio2]
|
95 |
outputs = gr.Audio(label="Output Audio", type='filepath')
|
96 |
|
97 |
+
title = "CloneVoiceAI"
|
98 |
+
description = "Gradio Demo for CloneVoiceAI: Towards High-Quality Text-Free One-Shot Voice Conversion. To use it, simply upload your audio, or click the example to load. Read more at the links below. Note: It seems that the WavLM checkpoint in HuggingFace is a little different from the one used to train FreeVC, which may degrade the performance a bit. In addition, speaker similarity can be largely affected if there are too much silence in the reference audio, so please <strong>trim</strong> it before submitting."
|
99 |
+
#article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2210.15418' target='_blank'>Paper</a> | <a href='https://github.com/OlaWod/FreeVC' target='_blank'>Github Repo</a></p>"
|
100 |
|
101 |
+
examples=[["CloneVoiceAI", 'p225_001.wav', 'p226_002.wav'], ["CloneVoiceAI-s", 'p226_002.wav', 'p225_001.wav'], ["CloneVoiceAI (24kHz)", 'p225_001.wav', 'p226_002.wav']]
|
102 |
|
103 |
gr.Interface(convert, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()
|