Vladimir Alabov commited on
Commit
30e6a40
1 Parent(s): ff5d419
Files changed (1) hide show
  1. app.py +78 -4
app.py CHANGED
@@ -1,9 +1,83 @@
1
  import os
2
  import io
3
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- def greet(name):
6
- return "Hello " + name + "!!"
7
 
8
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
9
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import io
3
  import gradio as gr
4
+ import librosa
5
+ import numpy as np
6
+ import logging
7
+ import soundfile
8
+ import asyncio
9
+ import argparse
10
+ import gradio.processing_utils as gr_processing_utils
11
+ logging.getLogger('numba').setLevel(logging.WARNING)
12
+ logging.getLogger('markdown_it').setLevel(logging.WARNING)
13
+ logging.getLogger('urllib3').setLevel(logging.WARNING)
14
+ logging.getLogger('matplotlib').setLevel(logging.WARNING)
15
 
16
+ limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces
 
17
 
18
+ audio_postprocess_ori = gr.Audio.postprocess
19
+
20
+ def audio_postprocess(self, y):
21
+ data = audio_postprocess_ori(self, y)
22
+ if data is None:
23
+ return None
24
+ return gr_processing_utils.encode_url_or_file_to_base64(data["name"])
25
+
26
+ gr.Audio.postprocess = audio_postprocess
27
+ def create_vc_fn(model, sid):
28
+ def vc_fn(input_audio, vc_transform, auto_f0):
29
+ if input_audio is None:
30
+ return "You need to upload an audio", None
31
+ sampling_rate, audio = input_audio
32
+ duration = audio.shape[0] / sampling_rate
33
+ if duration > 20 and limitation:
34
+ return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None
35
+ audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
36
+ if len(audio.shape) > 1:
37
+ audio = librosa.to_mono(audio.transpose(1, 0))
38
+ if sampling_rate != 16000:
39
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
40
+ raw_path = io.BytesIO()
41
+ soundfile.write(raw_path, audio, 16000, format="wav")
42
+ raw_path.seek(0)
43
+ out_audio, out_sr = model.infer(sid, vc_transform, raw_path,
44
+ auto_predict_f0=auto_f0,
45
+ )
46
+ return "Success", (44100, out_audio.cpu().numpy())
47
+ return vc_fn
48
+
49
+ if __name__ == '__main__':
50
+ parser = argparse.ArgumentParser()
51
+ parser.add_argument('--device', type=str, default='cpu')
52
+ parser.add_argument('--api', action="store_true", default=False)
53
+ parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
54
+ args = parser.parse_args()
55
+ models = []
56
+ voices = []
57
+ for f in os.listdir("models"):
58
+ name = f
59
+ # = Svc(fr"models/{f}/{f}.pth", f"models/{f}/config.json", device=args.device)
60
+ #cover = f"models/{f}/cover.png" if os.path.exists(f"models/{f}/cover.png") else None
61
+ #models.append((name, cover, create_vc_fn(model, name)))
62
+
63
+ # !svc infer {NAME}.wav -c config.json -m G_riri_220.pth
64
+ # display(Audio(f"{NAME}.out.wav", autoplay=True))
65
+ with gr.Blocks() as app:
66
+ gr.Markdown(
67
+ "# <center> Sovits Chapay\n"
68
+ "## <center> The input audio should be clean and pure voice without background music.\n"
69
+ )
70
+ with gr.Tabs():
71
+ for (name, cover, vc_fn) in models:
72
+ with gr.TabItem(name):
73
+ with gr.Row():
74
+ with gr.Column():
75
+ vc_input = gr.Audio(label="Input audio"+' (less than 20 seconds)' if limitation else '')
76
+ vc_transform = gr.Number(label="vc_transform", value=0)
77
+ auto_f0 = gr.Checkbox(label="auto_f0", value=False)
78
+ vc_submit = gr.Button("Generate", variant="primary")
79
+ with gr.Column():
80
+ vc_output1 = gr.Textbox(label="Output Message")
81
+ vc_output2 = gr.Audio(label="Output Audio")
82
+ vc_submit.click(vc_fn, [vc_input, vc_transform, auto_f0], [vc_output1, vc_output2])
83
+ app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)