File size: 4,566 Bytes
e613cea
 
6072bc6
30e6a40
 
 
 
 
 
 
 
 
 
 
dd17486
30e6a40
dd17486
30e6a40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9376c53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30e6a40
 
 
 
 
 
9376c53
 
 
 
30e6a40
 
 
 
 
 
 
 
 
 
9376c53
 
 
 
 
 
 
 
 
 
 
 
 
 
30e6a40
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import os
import io
import gradio as gr
import librosa
import numpy as np
import logging
import soundfile
import asyncio
import argparse
import gradio.processing_utils as gr_processing_utils
logging.getLogger('numba').setLevel(logging.WARNING)
logging.getLogger('markdown_it').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('matplotlib').setLevel(logging.WARNING)

limitation = os.getenv("SYSTEM") == "spaces"  # limit audio length in huggingface spaces

audio_postprocess_ori = gr.Audio.postprocess

def audio_postprocess(self, y):
    data = audio_postprocess_ori(self, y)
    if data is None:
        return None
    return gr_processing_utils.encode_url_or_file_to_base64(data["name"])

gr.Audio.postprocess = audio_postprocess
def create_vc_fn(model, sid):
    def vc_fn(input_audio, vc_transform, auto_f0):
        if input_audio is None:
            return "You need to upload an audio", None
        sampling_rate, audio = input_audio
        duration = audio.shape[0] / sampling_rate
        if duration > 20 and limitation:
            return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None
        audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
        if len(audio.shape) > 1:
            audio = librosa.to_mono(audio.transpose(1, 0))
        if sampling_rate != 16000:
            audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
        raw_path = io.BytesIO()
        soundfile.write(raw_path, audio, 16000, format="wav")
        raw_path.seek(0)
        out_audio, out_sr = model.infer(sid, vc_transform, raw_path,
                                       auto_predict_f0=auto_f0,
                                       )
        return "Success", (44100, out_audio.cpu().numpy())
    return vc_fn

def get_speakers():
  speakers = []

  for _,dirs,_ in os.walk(CUSTOM_MODELS_DIR):
    for folder in dirs:
      cur_speaker = {}
      # Look for G_****.pth
      g = glob.glob(os.path.join(CUSTOM_MODELS_DIR,folder,'G_*.pth'))
      if not len(g):
        continue
      cur_speaker["model_path"] = g[0]
      cur_speaker["model_folder"] = folder

      # Look for *.pt (clustering model)
      clst = glob.glob(os.path.join(CUSTOM_MODELS_DIR,folder,'*.pt'))
      if not len(clst):
        cur_speaker["cluster_path"] = ""
      else:
        cur_speaker["cluster_path"] = clst[0]

      # Look for config.json
      cfg = glob.glob(os.path.join(CUSTOM_MODELS_DIR,folder,'*.json'))
      if not len(cfg):
        continue
      cur_speaker["cfg_path"] = cfg[0]
      with open(cur_speaker["cfg_path"]) as f:
        try:
          cfg_json = json.loads(f.read())
        except Exception as e:
          print("Malformed config json in "+folder)
        for name, i in cfg_json["spk"].items():
          cur_speaker["name"] = name
          cur_speaker["id"] = i
          if not name.startswith('.'):
            speakers.append(copy.copy(cur_speaker))

  return sorted(speakers, key=lambda x:x["name"].lower())

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--device', type=str, default='cpu')
    parser.add_argument('--api', action="store_true", default=False)
    parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
    args = parser.parse_args()

    speakers = get_speakers()
    speaker_list = [x["name"] for x in speakers]

    models = []
    voices = []

    # !svc infer {NAME}.wav -c config.json -m G_riri_220.pth
    #  display(Audio(f"{NAME}.out.wav", autoplay=True))
    with gr.Blocks() as app:
        gr.Markdown(
            "# <center> Sovits Chapay\n"
            "## <center> The input audio should be clean and pure voice without background music.\n"
        )

        with gr.Row():
            with gr.Column():
                vc_input = gr.Audio(label="Input audio"+' (less than 20 seconds)' if limitation else '')

                vc_transform = gr.Number(label="vc_transform", value=0)

                voice = gr.Dropdown(choices=speaker_list, visible=True)

                vc_submit = gr.Button("Generate", variant="primary")
            with gr.Column():
                vc_output1 = gr.Textbox(label="Output Message")
                vc_output2 = gr.Audio(label="Output Audio")
            vc_submit.click(vc_fn, [vc_input, vc_transform, auto_f0], [vc_output1, vc_output2])
        app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)