diff --git a/NeuralSeq/LICENSE b/NeuralSeq/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..563c90c56cc4bea1f10c6307123319d92441b556 --- /dev/null +++ b/NeuralSeq/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Jinglin Liu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/NeuralSeq/README.md b/NeuralSeq/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ac390032c587ed007db56faa13d6100dce7b2a76 --- /dev/null +++ b/NeuralSeq/README.md @@ -0,0 +1,9 @@ +--- +title: DiffSinger🎶 Diffusion for Singing Voice Synthesis +emoji: 🎶 +colorFrom: purple +colorTo: blue +sdk: gradio +app_file: "inference/svs/gradio/infer.py" +pinned: false +--- diff --git a/NeuralSeq/configs/config_base.yaml b/NeuralSeq/configs/config_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8519ba64caf62da365571b0b3edcace16e54c46c --- /dev/null +++ b/NeuralSeq/configs/config_base.yaml @@ -0,0 +1,42 @@ +# task +binary_data_dir: '' +work_dir: '' # experiment directory. +infer: false # infer +seed: 1234 +debug: false +save_codes: + - configs + - modules + - tasks + - utils + - usr + +############# +# dataset +############# +ds_workers: 1 +test_num: 100 +valid_num: 100 +endless_ds: false +sort_by_len: true + +######### +# train and eval +######### +load_ckpt: '' +save_ckpt: true +save_best: false +num_ckpt_keep: 3 +clip_grad_norm: 0 +accumulate_grad_batches: 1 +log_interval: 100 +num_sanity_val_steps: 5 # steps of validation at the beginning +check_val_every_n_epoch: 10 +val_check_interval: 2000 +max_epochs: 1000 +max_updates: 160000 +max_tokens: 31250 +max_sentences: 100000 +max_eval_tokens: -1 +max_eval_sentences: -1 +test_input_dir: '' diff --git a/NeuralSeq/configs/singing/base.yaml b/NeuralSeq/configs/singing/base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d211a1a544d86b1ccc4698a68e0c0dd94a4eec0c --- /dev/null +++ b/NeuralSeq/configs/singing/base.yaml @@ -0,0 +1,42 @@ +base_config: + - configs/tts/base.yaml + - configs/tts/base_zh.yaml + + +datasets: [] +test_prefixes: [] +test_num: 0 +valid_num: 0 + +pre_align_cls: data_gen.singing.pre_align.SingingPreAlign +binarizer_cls: data_gen.singing.binarize.SingingBinarizer +pre_align_args: + use_tone: false # for ZH + forced_align: mfa + use_sox: true +hop_size: 128 # Hop size. +fft_size: 512 # FFT size. +win_size: 512 # FFT size. +max_frames: 8000 +fmin: 50 # Minimum freq in mel basis calculation. +fmax: 11025 # Maximum frequency in mel basis calculation. +pitch_type: frame + +hidden_size: 256 +mel_loss: "ssim:0.5|l1:0.5" +lambda_f0: 0.0 +lambda_uv: 0.0 +lambda_energy: 0.0 +lambda_ph_dur: 0.0 +lambda_sent_dur: 0.0 +lambda_word_dur: 0.0 +predictor_grad: 0.0 +use_spk_embed: true +use_spk_id: false + +max_tokens: 20000 +max_updates: 400000 +num_spk: 100 +save_f0: true +use_gt_dur: true +use_gt_f0: true diff --git a/NeuralSeq/configs/singing/fs2.yaml b/NeuralSeq/configs/singing/fs2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..83fe12394dd07f9baa948c96ea1dcfcfbcaa4bc3 --- /dev/null +++ b/NeuralSeq/configs/singing/fs2.yaml @@ -0,0 +1,3 @@ +base_config: + - configs/tts/fs2.yaml + - configs/singing/base.yaml diff --git a/NeuralSeq/configs/tts/base.yaml b/NeuralSeq/configs/tts/base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f6d249ff47479a9693e6dd97ae0cb00aa436d98f --- /dev/null +++ b/NeuralSeq/configs/tts/base.yaml @@ -0,0 +1,95 @@ +# task +base_config: configs/config_base.yaml +task_cls: '' +############# +# dataset +############# +raw_data_dir: '' +processed_data_dir: '' +binary_data_dir: '' +dict_dir: '' +pre_align_cls: '' +binarizer_cls: data_gen.tts.base_binarizer.BaseBinarizer +pre_align_args: + use_tone: true # for ZH + forced_align: mfa + use_sox: false + txt_processor: en + allow_no_txt: false + denoise: false +binarization_args: + shuffle: false + with_txt: true + with_wav: false + with_align: true + with_spk_embed: true + with_f0: true + with_f0cwt: true + +loud_norm: false +endless_ds: true +reset_phone_dict: true + +test_num: 100 +valid_num: 100 +max_frames: 1550 +max_input_tokens: 1550 +audio_num_mel_bins: 80 +audio_sample_rate: 22050 +hop_size: 256 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate) +win_size: 1024 # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate) +fmin: 80 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) +fmax: 7600 # To be increased/reduced depending on data. +fft_size: 1024 # Extra window size is filled with 0 paddings to match this parameter +min_level_db: -100 +num_spk: 1 +mel_vmin: -6 +mel_vmax: 1.5 +ds_workers: 4 + +######### +# model +######### +dropout: 0.1 +enc_layers: 4 +dec_layers: 4 +hidden_size: 384 +num_heads: 2 +prenet_dropout: 0.5 +prenet_hidden_size: 256 +stop_token_weight: 5.0 +enc_ffn_kernel_size: 9 +dec_ffn_kernel_size: 9 +ffn_act: gelu +ffn_padding: 'SAME' + + +########### +# optimization +########### +lr: 2.0 +warmup_updates: 8000 +optimizer_adam_beta1: 0.9 +optimizer_adam_beta2: 0.98 +weight_decay: 0 +clip_grad_norm: 1 + + +########### +# train and eval +########### +max_tokens: 30000 +max_sentences: 100000 +max_eval_sentences: 1 +max_eval_tokens: 60000 +train_set_name: 'train' +valid_set_name: 'valid' +test_set_name: 'test' +vocoder: pwg +vocoder_ckpt: '' +profile_infer: false +out_wav_norm: false +save_gt: false +save_f0: false +gen_dir_name: '' +use_denoise: false diff --git a/NeuralSeq/configs/tts/base_zh.yaml b/NeuralSeq/configs/tts/base_zh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a43a9ad399233b5520c9dc7476ff611d3fa6e9bb --- /dev/null +++ b/NeuralSeq/configs/tts/base_zh.yaml @@ -0,0 +1,3 @@ +pre_align_args: + txt_processor: zh_g2pM +binarizer_cls: data_gen.tts.binarizer_zh.ZhBinarizer \ No newline at end of file diff --git a/NeuralSeq/configs/tts/emotion/base_text2mel.yaml b/NeuralSeq/configs/tts/emotion/base_text2mel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cef7eca03c268f3619759b733c9370acb3669e98 --- /dev/null +++ b/NeuralSeq/configs/tts/emotion/base_text2mel.yaml @@ -0,0 +1,17 @@ +raw_data_dir: 'data/raw/ESD' +processed_data_dir: 'data/processed/emotion' +binary_data_dir: 'data/binary/emotion' +pre_align_cls: configs.tts.emotion.pre_align.EmoPreAlign +audio_sample_rate: 16000 +binarization_args: + shuffle: true +binarizer_cls: data_gen.tts.base_binarizer_emotion.EmotionBinarizer +use_spk_id: true +test_num: 200 +num_spk: 10 +pitch_type: frame +min_frames: 128 +num_test_samples: 30 +mel_loss: "ssim:0.5|l1:0.5" +vocoder_ckpt: '' +use_emotion: true \ No newline at end of file diff --git a/NeuralSeq/configs/tts/emotion/pre_align.py b/NeuralSeq/configs/tts/emotion/pre_align.py new file mode 100644 index 0000000000000000000000000000000000000000..3b625295a118845c01a3677004070714d11c162b --- /dev/null +++ b/NeuralSeq/configs/tts/emotion/pre_align.py @@ -0,0 +1,25 @@ +import os + +from data_gen.tts.base_preprocess import BasePreprocessor +import glob +import re + +class EmoPreAlign(BasePreprocessor): + + def meta_data(self): + spks = ['0012', '0011', '0013', '0014', '0015', '0016', '0017', '0018', '0019', '0020'] + pattern = re.compile('[\t\n ]+') + for spk in spks: + for line in open(f"{self.raw_data_dir}/{spk}/{spk}.txt", 'r'): # 打开文件 + line = re.sub(pattern, ' ', line) + if line == ' ': continue + split_ = line.split(' ') + txt = ' '.join(split_[1: -2]) + item_name = split_[0] + emotion = split_[-2] + wav_fn = f'{self.raw_data_dir}/{spk}/{emotion}/{item_name}.wav' + yield item_name, wav_fn, txt, spk, emotion + + +if __name__ == "__main__": + EmoPreAlign().process() diff --git a/NeuralSeq/configs/tts/fs2.yaml b/NeuralSeq/configs/tts/fs2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dd6aa37175542f503d85b2e9f9cbe86238b32700 --- /dev/null +++ b/NeuralSeq/configs/tts/fs2.yaml @@ -0,0 +1,80 @@ +base_config: configs/tts/base.yaml +task_cls: tasks.tts.fs2.FastSpeech2Task + +# model +hidden_size: 256 +dropout: 0.1 +encoder_type: fft # fft|tacotron|tacotron2|conformer +encoder_K: 8 # for tacotron encoder +decoder_type: fft # fft|rnn|conv|conformer +use_pos_embed: true + +# duration +predictor_hidden: -1 +predictor_kernel: 5 +predictor_layers: 2 +dur_predictor_kernel: 3 +dur_predictor_layers: 2 +predictor_dropout: 0.5 + +# pitch and energy +use_pitch_embed: true +pitch_type: ph # frame|ph|cwt +use_uv: true +cwt_hidden_size: 128 +cwt_layers: 2 +cwt_loss: l1 +cwt_add_f0_loss: false +cwt_std_scale: 0.8 + +pitch_ar: false +#pitch_embed_type: 0q +pitch_loss: 'l1' # l1|l2|ssim +pitch_norm: log +use_energy_embed: false + +# reference encoder and speaker embedding +use_spk_id: false +use_split_spk_id: false +use_spk_embed: false +use_var_enc: false +lambda_commit: 0.25 +ref_norm_layer: bn +pitch_enc_hidden_stride_kernel: + - 0,2,5 # conv_hidden_size, conv_stride, conv_kernel_size. conv_hidden_size=0: use hidden_size + - 0,2,5 + - 0,2,5 +dur_enc_hidden_stride_kernel: + - 0,2,3 # conv_hidden_size, conv_stride, conv_kernel_size. conv_hidden_size=0: use hidden_size + - 0,2,3 + - 0,1,3 + + +# mel +mel_loss: l1:0.5|ssim:0.5 # l1|l2|gdl|ssim or l1:0.5|ssim:0.5 + +# loss lambda +lambda_f0: 1.0 +lambda_uv: 1.0 +lambda_energy: 0.1 +lambda_ph_dur: 1.0 +lambda_sent_dur: 1.0 +lambda_word_dur: 1.0 +predictor_grad: 0.1 + +# train and eval +pretrain_fs_ckpt: '' +warmup_updates: 2000 +max_tokens: 32000 +max_sentences: 100000 +max_eval_sentences: 1 +max_updates: 120000 +num_valid_plots: 5 +num_test_samples: 0 +test_ids: [] +use_gt_dur: false +use_gt_f0: false + +# exp +dur_loss: mse # huber|mol +norm_type: gn \ No newline at end of file diff --git a/NeuralSeq/configs/tts/hifigan.yaml b/NeuralSeq/configs/tts/hifigan.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5ecd7390bcbd3c34a8303057c40a95bc841dd9db --- /dev/null +++ b/NeuralSeq/configs/tts/hifigan.yaml @@ -0,0 +1,21 @@ +base_config: configs/tts/pwg.yaml +task_cls: tasks.vocoder.hifigan.HifiGanTask +resblock: "1" +adam_b1: 0.8 +adam_b2: 0.99 +upsample_rates: [ 8,8,2,2 ] +upsample_kernel_sizes: [ 16,16,4,4 ] +upsample_initial_channel: 128 +resblock_kernel_sizes: [ 3,7,11 ] +resblock_dilation_sizes: [ [ 1,3,5 ], [ 1,3,5 ], [ 1,3,5 ] ] + +lambda_mel: 45.0 + +max_samples: 8192 +max_sentences: 16 + +generator_params: + lr: 0.0002 # Generator's learning rate. + aux_context_window: 0 # Context window size for auxiliary feature. +discriminator_optimizer_params: + lr: 0.0002 # Discriminator's learning rate. \ No newline at end of file diff --git a/NeuralSeq/configs/tts/libritts/__pycache__/pre_align.cpython-38.pyc b/NeuralSeq/configs/tts/libritts/__pycache__/pre_align.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02f74151f4803603868e392d039f183f37972fbd Binary files /dev/null and b/NeuralSeq/configs/tts/libritts/__pycache__/pre_align.cpython-38.pyc differ diff --git a/NeuralSeq/configs/tts/libritts/base_text2mel.yaml b/NeuralSeq/configs/tts/libritts/base_text2mel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52eb977c2cd837d2df25f62951592328f6861714 --- /dev/null +++ b/NeuralSeq/configs/tts/libritts/base_text2mel.yaml @@ -0,0 +1,14 @@ +raw_data_dir: 'data/raw/LibriTTS' +processed_data_dir: 'data/processed/libritts' +binary_data_dir: 'data/binary/libritts' +pre_align_cls: configs.tts.libritts.pre_align.LibrittsPreAlign +binarization_args: + shuffle: true +use_spk_id: true +test_num: 200 +num_spk: 2320 +pitch_type: frame +min_frames: 128 +num_test_samples: 30 +mel_loss: "ssim:0.5|l1:0.5" +vocoder_ckpt: '' \ No newline at end of file diff --git a/NeuralSeq/configs/tts/libritts/fs2.yaml b/NeuralSeq/configs/tts/libritts/fs2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a5250ba5677e94643ea0e72083a908fe4cd2bb87 --- /dev/null +++ b/NeuralSeq/configs/tts/libritts/fs2.yaml @@ -0,0 +1,3 @@ +base_config: + - configs/tts/fs2.yaml + - ./base_text2mel.yaml diff --git a/NeuralSeq/configs/tts/libritts/pre_align.py b/NeuralSeq/configs/tts/libritts/pre_align.py new file mode 100644 index 0000000000000000000000000000000000000000..8f04d01361430a4ad6b02421ac4e20d797f31dc8 --- /dev/null +++ b/NeuralSeq/configs/tts/libritts/pre_align.py @@ -0,0 +1,27 @@ +import os + +from data_gen.tts.base_preprocess import BasePreprocessor +import glob + + +class LibrittsPreAlign(BasePreprocessor): + def meta_data(self): + wav_fns = sorted(glob.glob(f'{self.raw_data_dir}/*/*/*.wav')) + for wav_fn in wav_fns: + item_name = os.path.basename(wav_fn)[:-4] + txt_fn = f'{wav_fn[:-4]}.normalized.txt' + with open(txt_fn, 'r') as f: + txt = f.readlines() + f.close() + spk = item_name.split("_")[0] + # Example: + # + # 'item_name': '103_1241_000000_000001' + # 'wav_fn': 'LibriTTS/train-clean-100/103/1241/103_1241_000000_000001.wav' + # 'txt': 'matthew Cuthbert is surprised' + # 'spk_name': '103' + yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': txt[0], 'spk_name': spk} + + +if __name__ == "__main__": + LibrittsPreAlign().process() diff --git a/NeuralSeq/configs/tts/libritts/pwg.yaml b/NeuralSeq/configs/tts/libritts/pwg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a0fd70869274c3f8a7ac02a9ca5d7e1202dc895d --- /dev/null +++ b/NeuralSeq/configs/tts/libritts/pwg.yaml @@ -0,0 +1,8 @@ +base_config: egs/egs_bases/tts/vocoder/pwg.yaml +raw_data_dir: 'data/raw/LibriTTS' +processed_data_dir: 'data/processed/libritts' +binary_data_dir: 'data/binary/libritts_wav' +generator_params: + kernel_size: 5 +num_spk: 400 +max_samples: 20480 diff --git a/NeuralSeq/configs/tts/lj/base_mel2wav.yaml b/NeuralSeq/configs/tts/lj/base_mel2wav.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f99da439f539cee27f29d60bfbe2edcc06858bac --- /dev/null +++ b/NeuralSeq/configs/tts/lj/base_mel2wav.yaml @@ -0,0 +1,3 @@ +raw_data_dir: 'data/raw/LJSpeech-1.1' +processed_data_dir: 'data/processed/ljspeech' +binary_data_dir: 'data/binary/ljspeech_wav' diff --git a/NeuralSeq/configs/tts/lj/base_text2mel.yaml b/NeuralSeq/configs/tts/lj/base_text2mel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0e0f3ce585226bb0f44533281033098d174ca7fb --- /dev/null +++ b/NeuralSeq/configs/tts/lj/base_text2mel.yaml @@ -0,0 +1,13 @@ +raw_data_dir: 'data/raw/LJSpeech-1.1' +processed_data_dir: 'data/processed/ljspeech' +binary_data_dir: 'data/binary/ljspeech' +pre_align_cls: data_gen.tts.lj.pre_align.LJPreAlign + +pitch_type: cwt +mel_loss: l1 +num_test_samples: 20 +test_ids: [ 68, 70, 74, 87, 110, 172, 190, 215, 231, 294, + 316, 324, 402, 422, 485, 500, 505, 508, 509, 519 ] +use_energy_embed: false +test_num: 523 +valid_num: 348 \ No newline at end of file diff --git a/NeuralSeq/configs/tts/lj/fs2.yaml b/NeuralSeq/configs/tts/lj/fs2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bc894b16c094b3d94771a0ad73566c28381c61e4 --- /dev/null +++ b/NeuralSeq/configs/tts/lj/fs2.yaml @@ -0,0 +1,3 @@ +base_config: + - configs/tts/fs2.yaml + - configs/tts/lj/base_text2mel.yaml \ No newline at end of file diff --git a/NeuralSeq/configs/tts/lj/hifigan.yaml b/NeuralSeq/configs/tts/lj/hifigan.yaml new file mode 100644 index 0000000000000000000000000000000000000000..68354a46f3c38b420283b12bc7d62761bfd38d5e --- /dev/null +++ b/NeuralSeq/configs/tts/lj/hifigan.yaml @@ -0,0 +1,3 @@ +base_config: + - configs/tts/hifigan.yaml + - configs/tts/lj/base_mel2wav.yaml \ No newline at end of file diff --git a/NeuralSeq/configs/tts/lj/pwg.yaml b/NeuralSeq/configs/tts/lj/pwg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..72d539eaabd8035b8e38a0ded5d579efa8d42520 --- /dev/null +++ b/NeuralSeq/configs/tts/lj/pwg.yaml @@ -0,0 +1,3 @@ +base_config: + - configs/tts/pwg.yaml + - configs/tts/lj/base_mel2wav.yaml \ No newline at end of file diff --git a/NeuralSeq/configs/tts/pwg.yaml b/NeuralSeq/configs/tts/pwg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bb443d584b64223ac467417c12f62f6f403fa52f --- /dev/null +++ b/NeuralSeq/configs/tts/pwg.yaml @@ -0,0 +1,110 @@ +base_config: configs/tts/base.yaml +task_cls: tasks.vocoder.pwg.PwgTask + +binarization_args: + with_wav: true + with_spk_embed: false + with_align: false +test_input_dir: '' + +########### +# train and eval +########### +max_samples: 25600 +max_sentences: 5 +max_eval_sentences: 1 +max_updates: 1000000 +val_check_interval: 2000 + + +########################################################### +# FEATURE EXTRACTION SETTING # +########################################################### +sampling_rate: 22050 # Sampling rate. +fft_size: 1024 # FFT size. +hop_size: 256 # Hop size. +win_length: null # Window length. +# If set to null, it will be the same as fft_size. +window: "hann" # Window function. +num_mels: 80 # Number of mel basis. +fmin: 80 # Minimum freq in mel basis calculation. +fmax: 7600 # Maximum frequency in mel basis calculation. +format: "hdf5" # Feature file format. "npy" or "hdf5" is supported. + +########################################################### +# GENERATOR NETWORK ARCHITECTURE SETTING # +########################################################### +generator_params: + in_channels: 1 # Number of input channels. + out_channels: 1 # Number of output channels. + kernel_size: 3 # Kernel size of dilated convolution. + layers: 30 # Number of residual block layers. + stacks: 3 # Number of stacks i.e., dilation cycles. + residual_channels: 64 # Number of channels in residual conv. + gate_channels: 128 # Number of channels in gated conv. + skip_channels: 64 # Number of channels in skip conv. + aux_channels: 80 # Number of channels for auxiliary feature conv. + # Must be the same as num_mels. + aux_context_window: 2 # Context window size for auxiliary feature. + # If set to 2, previous 2 and future 2 frames will be considered. + dropout: 0.0 # Dropout rate. 0.0 means no dropout applied. + use_weight_norm: true # Whether to use weight norm. + # If set to true, it will be applied to all of the conv layers. + upsample_net: "ConvInUpsampleNetwork" # Upsampling network architecture. + upsample_params: # Upsampling network parameters. + upsample_scales: [4, 4, 4, 4] # Upsampling scales. Prodcut of these must be the same as hop size. + use_pitch_embed: false + +########################################################### +# DISCRIMINATOR NETWORK ARCHITECTURE SETTING # +########################################################### +discriminator_params: + in_channels: 1 # Number of input channels. + out_channels: 1 # Number of output channels. + kernel_size: 3 # Number of output channels. + layers: 10 # Number of conv layers. + conv_channels: 64 # Number of chnn layers. + bias: true # Whether to use bias parameter in conv. + use_weight_norm: true # Whether to use weight norm. + # If set to true, it will be applied to all of the conv layers. + nonlinear_activation: "LeakyReLU" # Nonlinear function after each conv. + nonlinear_activation_params: # Nonlinear function parameters + negative_slope: 0.2 # Alpha in LeakyReLU. + +########################################################### +# STFT LOSS SETTING # +########################################################### +stft_loss_params: + fft_sizes: [1024, 2048, 512] # List of FFT size for STFT-based loss. + hop_sizes: [120, 240, 50] # List of hop size for STFT-based loss + win_lengths: [600, 1200, 240] # List of window length for STFT-based loss. + window: "hann_window" # Window function for STFT-based loss +use_mel_loss: false + +########################################################### +# ADVERSARIAL LOSS SETTING # +########################################################### +lambda_adv: 4.0 # Loss balancing coefficient. + +########################################################### +# OPTIMIZER & SCHEDULER SETTING # +########################################################### +generator_optimizer_params: + lr: 0.0001 # Generator's learning rate. + eps: 1.0e-6 # Generator's epsilon. + weight_decay: 0.0 # Generator's weight decay coefficient. +generator_scheduler_params: + step_size: 200000 # Generator's scheduler step size. + gamma: 0.5 # Generator's scheduler gamma. + # At each step size, lr will be multiplied by this parameter. +generator_grad_norm: 10 # Generator's gradient norm. +discriminator_optimizer_params: + lr: 0.00005 # Discriminator's learning rate. + eps: 1.0e-6 # Discriminator's epsilon. + weight_decay: 0.0 # Discriminator's weight decay coefficient. +discriminator_scheduler_params: + step_size: 200000 # Discriminator's scheduler step size. + gamma: 0.5 # Discriminator's scheduler gamma. + # At each step size, lr will be multiplied by this parameter. +discriminator_grad_norm: 1 # Discriminator's gradient norm. +disc_start_steps: 40000 # Number of steps to start to train discriminator. diff --git a/NeuralSeq/data_gen/tts/__pycache__/base_binarizer.cpython-38.pyc b/NeuralSeq/data_gen/tts/__pycache__/base_binarizer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..727180589707ff1f0d10edf1894f9c0a88ba6250 Binary files /dev/null and b/NeuralSeq/data_gen/tts/__pycache__/base_binarizer.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/__pycache__/base_binarizer_emotion.cpython-38.pyc b/NeuralSeq/data_gen/tts/__pycache__/base_binarizer_emotion.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..679885e7fedfb14d99ae273024520656fc5a86d2 Binary files /dev/null and b/NeuralSeq/data_gen/tts/__pycache__/base_binarizer_emotion.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/__pycache__/base_preprocess.cpython-38.pyc b/NeuralSeq/data_gen/tts/__pycache__/base_preprocess.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64732b31e2a5e419e38d45d8f1105da752e8c818 Binary files /dev/null and b/NeuralSeq/data_gen/tts/__pycache__/base_preprocess.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/__pycache__/data_gen_utils.cpython-37.pyc b/NeuralSeq/data_gen/tts/__pycache__/data_gen_utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1332fa19347d585077de8405363b37f76152e910 Binary files /dev/null and b/NeuralSeq/data_gen/tts/__pycache__/data_gen_utils.cpython-37.pyc differ diff --git a/NeuralSeq/data_gen/tts/__pycache__/data_gen_utils.cpython-38.pyc b/NeuralSeq/data_gen/tts/__pycache__/data_gen_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..436672bdaa39e014f6e9d4cb0bacd2cb4e4d545c Binary files /dev/null and b/NeuralSeq/data_gen/tts/__pycache__/data_gen_utils.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/base_binarizer.py b/NeuralSeq/data_gen/tts/base_binarizer.py new file mode 100644 index 0000000000000000000000000000000000000000..b30a20c1cdc3403214ff527d68a50806befafeb9 --- /dev/null +++ b/NeuralSeq/data_gen/tts/base_binarizer.py @@ -0,0 +1,224 @@ +import os +os.environ["OMP_NUM_THREADS"] = "1" + +from utils.multiprocess_utils import chunked_multiprocess_run +import random +import traceback +import json +from resemblyzer import VoiceEncoder +from tqdm import tqdm +from data_gen.tts.data_gen_utils import get_mel2ph, get_pitch, build_phone_encoder +from utils.hparams import set_hparams, hparams +import numpy as np +from utils.indexed_datasets import IndexedDatasetBuilder +from vocoders.base_vocoder import VOCODERS +import pandas as pd + + +class BinarizationError(Exception): + pass + + +class BaseBinarizer: + def __init__(self, processed_data_dir=None): + if processed_data_dir is None: + processed_data_dir = hparams['processed_data_dir'] + self.processed_data_dirs = processed_data_dir.split(",") + self.binarization_args = hparams['binarization_args'] + self.pre_align_args = hparams['pre_align_args'] + self.forced_align = self.pre_align_args['forced_align'] + tg_dir = None + if self.forced_align == 'mfa': + tg_dir = 'mfa_outputs' + if self.forced_align == 'kaldi': + tg_dir = 'kaldi_outputs' + self.item2txt = {} + self.item2ph = {} + self.item2wavfn = {} + self.item2tgfn = {} + self.item2spk = {} + for ds_id, processed_data_dir in enumerate(self.processed_data_dirs): + self.meta_df = pd.read_csv(f"{processed_data_dir}/metadata_phone.csv", dtype=str) + for r_idx, r in self.meta_df.iterrows(): + item_name = raw_item_name = r['item_name'] + if len(self.processed_data_dirs) > 1: + item_name = f'ds{ds_id}_{item_name}' + self.item2txt[item_name] = r['txt'] + self.item2ph[item_name] = r['ph'] + self.item2wavfn[item_name] = os.path.join(hparams['raw_data_dir'], 'wavs', os.path.basename(r['wav_fn']).split('_')[1]) + self.item2spk[item_name] = r.get('spk', 'SPK1') + if len(self.processed_data_dirs) > 1: + self.item2spk[item_name] = f"ds{ds_id}_{self.item2spk[item_name]}" + if tg_dir is not None: + self.item2tgfn[item_name] = f"{processed_data_dir}/{tg_dir}/{raw_item_name}.TextGrid" + self.item_names = sorted(list(self.item2txt.keys())) + if self.binarization_args['shuffle']: + random.seed(1234) + random.shuffle(self.item_names) + + @property + def train_item_names(self): + return self.item_names[hparams['test_num']+hparams['valid_num']:] + + @property + def valid_item_names(self): + return self.item_names[0: hparams['test_num']+hparams['valid_num']] # + + @property + def test_item_names(self): + return self.item_names[0: hparams['test_num']] # Audios for MOS testing are in 'test_ids' + + def build_spk_map(self): + spk_map = set() + for item_name in self.item_names: + spk_name = self.item2spk[item_name] + spk_map.add(spk_name) + spk_map = {x: i for i, x in enumerate(sorted(list(spk_map)))} + assert len(spk_map) == 0 or len(spk_map) <= hparams['num_spk'], len(spk_map) + return spk_map + + def item_name2spk_id(self, item_name): + return self.spk_map[self.item2spk[item_name]] + + def _phone_encoder(self): + ph_set_fn = f"{hparams['binary_data_dir']}/phone_set.json" + ph_set = [] + if hparams['reset_phone_dict'] or not os.path.exists(ph_set_fn): + for processed_data_dir in self.processed_data_dirs: + ph_set += [x.split(' ')[0] for x in open(f'{processed_data_dir}/dict.txt').readlines()] + ph_set = sorted(set(ph_set)) + json.dump(ph_set, open(ph_set_fn, 'w')) + else: + ph_set = json.load(open(ph_set_fn, 'r')) + print("| phone set: ", ph_set) + return build_phone_encoder(hparams['binary_data_dir']) + + def meta_data(self, prefix): + if prefix == 'valid': + item_names = self.valid_item_names + elif prefix == 'test': + item_names = self.test_item_names + else: + item_names = self.train_item_names + for item_name in item_names: + ph = self.item2ph[item_name] + txt = self.item2txt[item_name] + tg_fn = self.item2tgfn.get(item_name) + wav_fn = self.item2wavfn[item_name] + spk_id = self.item_name2spk_id(item_name) + yield item_name, ph, txt, tg_fn, wav_fn, spk_id + + def process(self): + os.makedirs(hparams['binary_data_dir'], exist_ok=True) + self.spk_map = self.build_spk_map() + print("| spk_map: ", self.spk_map) + spk_map_fn = f"{hparams['binary_data_dir']}/spk_map.json" + json.dump(self.spk_map, open(spk_map_fn, 'w')) + + self.phone_encoder = self._phone_encoder() + self.process_data('valid') + self.process_data('test') + self.process_data('train') + + def process_data(self, prefix): + data_dir = hparams['binary_data_dir'] + args = [] + builder = IndexedDatasetBuilder(f'{data_dir}/{prefix}') + lengths = [] + f0s = [] + total_sec = 0 + if self.binarization_args['with_spk_embed']: + voice_encoder = VoiceEncoder().cuda() + + meta_data = list(self.meta_data(prefix)) + for m in meta_data: + args.append(list(m) + [self.phone_encoder, self.binarization_args]) + num_workers = int(os.getenv('N_PROC', os.cpu_count() // 3)) + for f_id, (_, item) in enumerate( + zip(tqdm(meta_data), chunked_multiprocess_run(self.process_item, args, num_workers=num_workers))): + if item is None: + continue + item['spk_embed'] = voice_encoder.embed_utterance(item['wav']) \ + if self.binarization_args['with_spk_embed'] else None + if not self.binarization_args['with_wav'] and 'wav' in item: + print("del wav") + del item['wav'] + builder.add_item(item) + lengths.append(item['len']) + total_sec += item['sec'] + if item.get('f0') is not None: + f0s.append(item['f0']) + builder.finalize() + np.save(f'{data_dir}/{prefix}_lengths.npy', lengths) + if len(f0s) > 0: + f0s = np.concatenate(f0s, 0) + f0s = f0s[f0s != 0] + np.save(f'{data_dir}/{prefix}_f0s_mean_std.npy', [np.mean(f0s).item(), np.std(f0s).item()]) + print(f"| {prefix} total duration: {total_sec:.3f}s") + + @classmethod + def process_item(cls, item_name, ph, txt, tg_fn, wav_fn, spk_id, encoder, binarization_args): + if hparams['vocoder'] in VOCODERS: + wav, mel = VOCODERS[hparams['vocoder']].wav2spec(wav_fn) + else: + wav, mel = VOCODERS[hparams['vocoder'].split('.')[-1]].wav2spec(wav_fn) + res = { + 'item_name': item_name, 'txt': txt, 'ph': ph, 'mel': mel, 'wav': wav, 'wav_fn': wav_fn, + 'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0], 'spk_id': spk_id + } + try: + if binarization_args['with_f0']: + cls.get_pitch(wav, mel, res) + if binarization_args['with_f0cwt']: + cls.get_f0cwt(res['f0'], res) + if binarization_args['with_txt']: + try: + phone_encoded = res['phone'] = encoder.encode(ph) + except: + traceback.print_exc() + raise BinarizationError(f"Empty phoneme") + if binarization_args['with_align']: + cls.get_align(tg_fn, ph, mel, phone_encoded, res) + except BinarizationError as e: + print(f"| Skip item ({e}). item_name: {item_name}, wav_fn: {wav_fn}") + return None + return res + + @staticmethod + def get_align(tg_fn, ph, mel, phone_encoded, res): + if tg_fn is not None and os.path.exists(tg_fn): + mel2ph, dur = get_mel2ph(tg_fn, ph, mel, hparams) + else: + raise BinarizationError(f"Align not found") + if mel2ph.max() - 1 >= len(phone_encoded): + raise BinarizationError( + f"Align does not match: mel2ph.max() - 1: {mel2ph.max() - 1}, len(phone_encoded): {len(phone_encoded)}") + res['mel2ph'] = mel2ph + res['dur'] = dur + + @staticmethod + def get_pitch(wav, mel, res): + f0, pitch_coarse = get_pitch(wav, mel, hparams) + if sum(f0) == 0: + raise BinarizationError("Empty f0") + res['f0'] = f0 + res['pitch'] = pitch_coarse + + @staticmethod + def get_f0cwt(f0, res): + from utils.cwt import get_cont_lf0, get_lf0_cwt + uv, cont_lf0_lpf = get_cont_lf0(f0) + logf0s_mean_org, logf0s_std_org = np.mean(cont_lf0_lpf), np.std(cont_lf0_lpf) + cont_lf0_lpf_norm = (cont_lf0_lpf - logf0s_mean_org) / logf0s_std_org + Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_lpf_norm) + if np.any(np.isnan(Wavelet_lf0)): + raise BinarizationError("NaN CWT") + res['cwt_spec'] = Wavelet_lf0 + res['cwt_scales'] = scales + res['f0_mean'] = logf0s_mean_org + res['f0_std'] = logf0s_std_org + + +if __name__ == "__main__": + set_hparams() + BaseBinarizer().process() diff --git a/NeuralSeq/data_gen/tts/base_binarizer_emotion.py b/NeuralSeq/data_gen/tts/base_binarizer_emotion.py new file mode 100644 index 0000000000000000000000000000000000000000..2193a39417781a4673c11210cecb146401be2d09 --- /dev/null +++ b/NeuralSeq/data_gen/tts/base_binarizer_emotion.py @@ -0,0 +1,352 @@ +import os + +os.environ["OMP_NUM_THREADS"] = "1" +import torch +from collections import Counter +from utils.text_encoder import TokenTextEncoder +from data_gen.tts.emotion import inference as EmotionEncoder +from data_gen.tts.emotion.inference import embed_utterance as Embed_utterance +from data_gen.tts.emotion.inference import preprocess_wav +from utils.multiprocess_utils import chunked_multiprocess_run +import random +import traceback +import json +from resemblyzer import VoiceEncoder +from tqdm import tqdm +from data_gen.tts.data_gen_utils import get_mel2ph, get_pitch, build_phone_encoder, is_sil_phoneme +from utils.hparams import hparams, set_hparams +import numpy as np +from utils.indexed_datasets import IndexedDatasetBuilder +from vocoders.base_vocoder import get_vocoder_cls +import pandas as pd + + +class BinarizationError(Exception): + pass + + +class EmotionBinarizer: + def __init__(self, processed_data_dir=None): + if processed_data_dir is None: + processed_data_dir = hparams['processed_data_dir'] + self.processed_data_dirs = processed_data_dir.split(",") + self.binarization_args = hparams['binarization_args'] + self.pre_align_args = hparams['pre_align_args'] + self.item2txt = {} + self.item2ph = {} + self.item2wavfn = {} + self.item2tgfn = {} + self.item2spk = {} + self.item2emo = {} + + def load_meta_data(self): + for ds_id, processed_data_dir in enumerate(self.processed_data_dirs): + self.meta_df = pd.read_csv(f"{processed_data_dir}/metadata_phone.csv", dtype=str) + for r_idx, r in tqdm(self.meta_df.iterrows(), desc='Loading meta data.'): + item_name = raw_item_name = r['item_name'] + if len(self.processed_data_dirs) > 1: + item_name = f'ds{ds_id}_{item_name}' + self.item2txt[item_name] = r['txt'] + self.item2ph[item_name] = r['ph'] + self.item2wavfn[item_name] = r['wav_fn'] + self.item2spk[item_name] = r.get('spk_name', 'SPK1') \ + if self.binarization_args['with_spk_id'] else 'SPK1' + if len(self.processed_data_dirs) > 1: + self.item2spk[item_name] = f"ds{ds_id}_{self.item2spk[item_name]}" + self.item2tgfn[item_name] = f"{processed_data_dir}/mfa_outputs/{raw_item_name}.TextGrid" + self.item2emo[item_name] = r.get('others', '"Neutral"') + self.item_names = sorted(list(self.item2txt.keys())) + if self.binarization_args['shuffle']: + random.seed(1234) + random.shuffle(self.item_names) + + @property + def train_item_names(self): + return self.item_names[hparams['test_num']:] + + @property + def valid_item_names(self): + return self.item_names[:hparams['test_num']] + + @property + def test_item_names(self): + return self.valid_item_names + + def build_spk_map(self): + spk_map = set() + for item_name in self.item_names: + spk_name = self.item2spk[item_name] + spk_map.add(spk_name) + spk_map = {x: i for i, x in enumerate(sorted(list(spk_map)))} + print("| #Spk: ", len(spk_map)) + assert len(spk_map) == 0 or len(spk_map) <= hparams['num_spk'], len(spk_map) + return spk_map + + def build_emo_map(self): + emo_map = set() + for item_name in self.item_names: + emo_name = self.item2emo[item_name] + emo_map.add(emo_name) + emo_map = {x: i for i, x in enumerate(sorted(list(emo_map)))} + print("| #Emo: ", len(emo_map)) + return emo_map + + def item_name2spk_id(self, item_name): + return self.spk_map[self.item2spk[item_name]] + + def item_name2emo_id(self, item_name): + return self.emo_map[self.item2emo[item_name]] + + def _phone_encoder(self): + ph_set_fn = f"{hparams['binary_data_dir']}/phone_set.json" + ph_set = [] + if self.binarization_args['reset_phone_dict'] or not os.path.exists(ph_set_fn): + for ph_sent in self.item2ph.values(): + ph_set += ph_sent.split(' ') + ph_set = sorted(set(ph_set)) + json.dump(ph_set, open(ph_set_fn, 'w')) + print("| Build phone set: ", ph_set) + else: + ph_set = json.load(open(ph_set_fn, 'r')) + print("| Load phone set: ", ph_set) + return build_phone_encoder(hparams['binary_data_dir']) + + def _word_encoder(self): + fn = f"{hparams['binary_data_dir']}/word_set.json" + word_set = [] + if self.binarization_args['reset_word_dict']: + for word_sent in self.item2txt.values(): + word_set += [x for x in word_sent.split(' ') if x != ''] + word_set = Counter(word_set) + total_words = sum(word_set.values()) + word_set = word_set.most_common(hparams['word_size']) + num_unk_words = total_words - sum([x[1] for x in word_set]) + word_set = [x[0] for x in word_set] + json.dump(word_set, open(fn, 'w')) + print(f"| Build word set. Size: {len(word_set)}, #total words: {total_words}," + f" #unk_words: {num_unk_words}, word_set[:10]:, {word_set[:10]}.") + else: + word_set = json.load(open(fn, 'r')) + print("| Load word set. Size: ", len(word_set), word_set[:10]) + return TokenTextEncoder(None, vocab_list=word_set, replace_oov='') + + def meta_data(self, prefix): + if prefix == 'valid': + item_names = self.valid_item_names + elif prefix == 'test': + item_names = self.test_item_names + else: + item_names = self.train_item_names + for item_name in item_names: + ph = self.item2ph[item_name] + txt = self.item2txt[item_name] + tg_fn = self.item2tgfn.get(item_name) + wav_fn = self.item2wavfn[item_name] + spk_id = self.item_name2spk_id(item_name) + emotion = self.item_name2emo_id(item_name) + yield item_name, ph, txt, tg_fn, wav_fn, spk_id, emotion + + def process(self): + self.load_meta_data() + os.makedirs(hparams['binary_data_dir'], exist_ok=True) + self.spk_map = self.build_spk_map() + print("| spk_map: ", self.spk_map) + spk_map_fn = f"{hparams['binary_data_dir']}/spk_map.json" + json.dump(self.spk_map, open(spk_map_fn, 'w')) + + self.emo_map = self.build_emo_map() + print("| emo_map: ", self.emo_map) + emo_map_fn = f"{hparams['binary_data_dir']}/emo_map.json" + json.dump(self.emo_map, open(emo_map_fn, 'w')) + + self.phone_encoder = self._phone_encoder() + self.word_encoder = None + EmotionEncoder.load_model(hparams['emotion_encoder_path']) + + if self.binarization_args['with_word']: + self.word_encoder = self._word_encoder() + self.process_data('valid') + self.process_data('test') + self.process_data('train') + + def process_data(self, prefix): + data_dir = hparams['binary_data_dir'] + args = [] + builder = IndexedDatasetBuilder(f'{data_dir}/{prefix}') + ph_lengths = [] + mel_lengths = [] + f0s = [] + total_sec = 0 + if self.binarization_args['with_spk_embed']: + voice_encoder = VoiceEncoder().cuda() + + meta_data = list(self.meta_data(prefix)) + for m in meta_data: + args.append(list(m) + [(self.phone_encoder, self.word_encoder), self.binarization_args]) + num_workers = self.num_workers + for f_id, (_, item) in enumerate( + zip(tqdm(meta_data), chunked_multiprocess_run(self.process_item, args, num_workers=num_workers))): + if item is None: + continue + item['spk_embed'] = voice_encoder.embed_utterance(item['wav']) \ + if self.binarization_args['with_spk_embed'] else None + processed_wav = preprocess_wav(item['wav_fn']) + item['emo_embed'] = Embed_utterance(processed_wav) + if not self.binarization_args['with_wav'] and 'wav' in item: + del item['wav'] + builder.add_item(item) + mel_lengths.append(item['len']) + if 'ph_len' in item: + ph_lengths.append(item['ph_len']) + total_sec += item['sec'] + if item.get('f0') is not None: + f0s.append(item['f0']) + builder.finalize() + np.save(f'{data_dir}/{prefix}_lengths.npy', mel_lengths) + if len(ph_lengths) > 0: + np.save(f'{data_dir}/{prefix}_ph_lengths.npy', ph_lengths) + if len(f0s) > 0: + f0s = np.concatenate(f0s, 0) + f0s = f0s[f0s != 0] + np.save(f'{data_dir}/{prefix}_f0s_mean_std.npy', [np.mean(f0s).item(), np.std(f0s).item()]) + print(f"| {prefix} total duration: {total_sec:.3f}s") + + @classmethod + def process_item(cls, item_name, ph, txt, tg_fn, wav_fn, spk_id, emotion, encoder, binarization_args): + res = {'item_name': item_name, 'txt': txt, 'ph': ph, 'wav_fn': wav_fn, 'spk_id': spk_id, 'emotion': emotion} + if binarization_args['with_linear']: + wav, mel, linear_stft = get_vocoder_cls(hparams).wav2spec(wav_fn) # , return_linear=True + res['linear'] = linear_stft + else: + wav, mel = get_vocoder_cls(hparams).wav2spec(wav_fn) + wav = wav.astype(np.float16) + res.update({'mel': mel, 'wav': wav, + 'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0]}) + try: + if binarization_args['with_f0']: + cls.get_pitch(res) + if binarization_args['with_f0cwt']: + cls.get_f0cwt(res) + if binarization_args['with_txt']: + ph_encoder, word_encoder = encoder + try: + res['phone'] = ph_encoder.encode(ph) + res['ph_len'] = len(res['phone']) + except: + traceback.print_exc() + raise BinarizationError(f"Empty phoneme") + if binarization_args['with_align']: + cls.get_align(tg_fn, res) + if binarization_args['trim_eos_bos']: + bos_dur = res['dur'][0] + eos_dur = res['dur'][-1] + res['mel'] = mel[bos_dur:-eos_dur] + res['f0'] = res['f0'][bos_dur:-eos_dur] + res['pitch'] = res['pitch'][bos_dur:-eos_dur] + res['mel2ph'] = res['mel2ph'][bos_dur:-eos_dur] + res['wav'] = wav[bos_dur * hparams['hop_size']:-eos_dur * hparams['hop_size']] + res['dur'] = res['dur'][1:-1] + res['len'] = res['mel'].shape[0] + if binarization_args['with_word']: + cls.get_word(res, word_encoder) + except BinarizationError as e: + print(f"| Skip item ({e}). item_name: {item_name}, wav_fn: {wav_fn}") + return None + except Exception as e: + traceback.print_exc() + print(f"| Skip item. item_name: {item_name}, wav_fn: {wav_fn}") + return None + return res + + @staticmethod + def get_align(tg_fn, res): + ph = res['ph'] + mel = res['mel'] + phone_encoded = res['phone'] + if tg_fn is not None and os.path.exists(tg_fn): + mel2ph, dur = get_mel2ph(tg_fn, ph, mel, hparams) + else: + raise BinarizationError(f"Align not found") + if mel2ph.max() - 1 >= len(phone_encoded): + raise BinarizationError( + f"Align does not match: mel2ph.max() - 1: {mel2ph.max() - 1}, len(phone_encoded): {len(phone_encoded)}") + res['mel2ph'] = mel2ph + res['dur'] = dur + + @staticmethod + def get_pitch(res): + wav, mel = res['wav'], res['mel'] + f0, pitch_coarse = get_pitch(wav, mel, hparams) + if sum(f0) == 0: + raise BinarizationError("Empty f0") + res['f0'] = f0 + res['pitch'] = pitch_coarse + + @staticmethod + def get_f0cwt(res): + from utils.cwt import get_cont_lf0, get_lf0_cwt + f0 = res['f0'] + uv, cont_lf0_lpf = get_cont_lf0(f0) + logf0s_mean_org, logf0s_std_org = np.mean(cont_lf0_lpf), np.std(cont_lf0_lpf) + cont_lf0_lpf_norm = (cont_lf0_lpf - logf0s_mean_org) / logf0s_std_org + Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_lpf_norm) + if np.any(np.isnan(Wavelet_lf0)): + raise BinarizationError("NaN CWT") + res['cwt_spec'] = Wavelet_lf0 + res['cwt_scales'] = scales + res['f0_mean'] = logf0s_mean_org + res['f0_std'] = logf0s_std_org + + @staticmethod + def get_word(res, word_encoder): + ph_split = res['ph'].split(" ") + # ph side mapping to word + ph_words = [] # ['', 'N_AW1_', ',', 'AE1_Z_|', 'AO1_L_|', 'B_UH1_K_S_|', 'N_AA1_T_|', ....] + ph2word = np.zeros([len(ph_split)], dtype=int) + last_ph_idx_for_word = [] # [2, 11, ...] + for i, ph in enumerate(ph_split): + if ph == '|': + last_ph_idx_for_word.append(i) + elif not ph[0].isalnum(): + if ph not in ['']: + last_ph_idx_for_word.append(i - 1) + last_ph_idx_for_word.append(i) + start_ph_idx_for_word = [0] + [i + 1 for i in last_ph_idx_for_word[:-1]] + for i, (s_w, e_w) in enumerate(zip(start_ph_idx_for_word, last_ph_idx_for_word)): + ph_words.append(ph_split[s_w:e_w + 1]) + ph2word[s_w:e_w + 1] = i + ph2word = ph2word.tolist() + ph_words = ["_".join(w) for w in ph_words] + + # mel side mapping to word + mel2word = [] + dur_word = [0 for _ in range(len(ph_words))] + for i, m2p in enumerate(res['mel2ph']): + word_idx = ph2word[m2p - 1] + mel2word.append(ph2word[m2p - 1]) + dur_word[word_idx] += 1 + ph2word = [x + 1 for x in ph2word] # 0预留给padding + mel2word = [x + 1 for x in mel2word] # 0预留给padding + res['ph_words'] = ph_words # [T_word] + res['ph2word'] = ph2word # [T_ph] + res['mel2word'] = mel2word # [T_mel] + res['dur_word'] = dur_word # [T_word] + words = [x for x in res['txt'].split(" ") if x != ''] + while len(words) > 0 and is_sil_phoneme(words[0]): + words = words[1:] + while len(words) > 0 and is_sil_phoneme(words[-1]): + words = words[:-1] + words = [''] + words + [''] + word_tokens = word_encoder.encode(" ".join(words)) + res['words'] = words + res['word_tokens'] = word_tokens + assert len(words) == len(ph_words), [words, ph_words] + + @property + def num_workers(self): + return int(os.getenv('N_PROC', hparams.get('N_PROC', os.cpu_count()))) + + +if __name__ == "__main__": + set_hparams() + EmotionBinarizer().process() diff --git a/NeuralSeq/data_gen/tts/base_preprocess.py b/NeuralSeq/data_gen/tts/base_preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..56c69875ad74ad45e774efa4029c5e3d858166ce --- /dev/null +++ b/NeuralSeq/data_gen/tts/base_preprocess.py @@ -0,0 +1,254 @@ +import json +import os +import random +import re +import traceback +from collections import Counter +from functools import partial +import pandas as pd +import librosa +from tqdm import tqdm +from data_gen.tts.txt_processors.base_text_processor import get_txt_processor_cls +from data_gen.tts.wav_processors.base_processor import get_wav_processor_cls +from utils.hparams import hparams +from utils.multiprocess_utils import multiprocess_run_tqdm +from utils.os_utils import link_file, move_file, remove_file +from data_gen.tts.data_gen_utils import is_sil_phoneme, build_token_encoder + + +class BasePreprocessor: + def __init__(self): + self.preprocess_args = hparams['preprocess_args'] + txt_processor = self.preprocess_args['txt_processor'] + self.txt_processor = get_txt_processor_cls(txt_processor) + self.raw_data_dir = hparams['raw_data_dir'] + self.processed_dir = hparams['processed_data_dir'] + self.spk_map_fn = f"{self.processed_dir}/spk_map.json" + + def meta_data(self): + """ + :return: {'item_name': Str, 'wav_fn': Str, 'txt': Str, 'spk_name': Str, 'txt_loader': None or Func} + """ + raise NotImplementedError + + def process(self): + processed_dir = self.processed_dir + wav_processed_tmp_dir = f'{processed_dir}/processed_tmp' + remove_file(wav_processed_tmp_dir) + os.makedirs(wav_processed_tmp_dir, exist_ok=True) + wav_processed_dir = f'{processed_dir}/{self.wav_processed_dirname}' + remove_file(wav_processed_dir) + os.makedirs(wav_processed_dir, exist_ok=True) + + meta_data = list(tqdm(self.meta_data(), desc='Load meta data')) + item_names = [d['item_name'] for d in meta_data] + assert len(item_names) == len(set(item_names)), 'Key `item_name` should be Unique.' + + # preprocess data + phone_list = [] + word_list = [] + spk_names = set() + process_item = partial(self.preprocess_first_pass, + txt_processor=self.txt_processor, + wav_processed_dir=wav_processed_dir, + wav_processed_tmp=wav_processed_tmp_dir, + preprocess_args=self.preprocess_args) + items = [] + args = [{ + 'item_name': item_raw['item_name'], + 'txt_raw': item_raw['txt'], + 'wav_fn': item_raw['wav_fn'], + 'txt_loader': item_raw.get('txt_loader'), + 'others': item_raw.get('others', None) + } for item_raw in meta_data] + for item_, (item_id, item) in zip(meta_data, multiprocess_run_tqdm(process_item, args, desc='Preprocess')): + if item is not None: + item_.update(item) + item = item_ + if 'txt_loader' in item: + del item['txt_loader'] + item['id'] = item_id + item['spk_name'] = item.get('spk_name', '') + item['others'] = item.get('others', None) + phone_list += item['ph'].split(" ") + word_list += item['word'].split(" ") + spk_names.add(item['spk_name']) + items.append(item) + + # add encoded tokens + ph_encoder, word_encoder = self._phone_encoder(phone_list), self._word_encoder(word_list) + spk_map = self.build_spk_map(spk_names) + args = [{ + 'ph': item['ph'], 'word': item['word'], 'spk_name': item['spk_name'], + 'word_encoder': word_encoder, 'ph_encoder': ph_encoder, 'spk_map': spk_map + } for item in items] + for idx, item_new_kv in multiprocess_run_tqdm(self.preprocess_second_pass, args, desc='Add encoded tokens'): + items[idx].update(item_new_kv) + + # build mfa data + if self.preprocess_args['use_mfa']: + mfa_dict = set() + mfa_input_dir = f'{processed_dir}/mfa_inputs' + remove_file(mfa_input_dir) + # group MFA inputs for better parallelism + mfa_groups = [i // self.preprocess_args['nsample_per_mfa_group'] for i in range(len(items))] + if self.preprocess_args['mfa_group_shuffle']: + random.seed(hparams['seed']) + random.shuffle(mfa_groups) + args = [{ + 'item': item, 'mfa_input_dir': mfa_input_dir, + 'mfa_group': mfa_group, 'wav_processed_tmp': wav_processed_tmp_dir, + 'preprocess_args': self.preprocess_args + } for item, mfa_group in zip(items, mfa_groups)] + for i, (ph_gb_word_nosil, new_wav_align_fn) in multiprocess_run_tqdm( + self.build_mfa_inputs, args, desc='Build MFA data'): + items[i]['wav_align_fn'] = new_wav_align_fn + for w in ph_gb_word_nosil.split(" "): + mfa_dict.add(f"{w} {w.replace('_', ' ')}") + mfa_dict = sorted(mfa_dict) + with open(f'{processed_dir}/mfa_dict.txt', 'w') as f: + f.writelines([f'{l}\n' for l in mfa_dict]) + with open(f"{processed_dir}/{self.meta_csv_filename}.json", 'w') as f: + f.write(re.sub(r'\n\s+([\d+\]])', r'\1', json.dumps(items, ensure_ascii=False, sort_keys=False, indent=1))) + remove_file(wav_processed_tmp_dir) + + + @classmethod + def preprocess_first_pass(cls, item_name, txt_raw, txt_processor, + wav_fn, wav_processed_dir, wav_processed_tmp, + preprocess_args, txt_loader=None, others=None): + try: + if txt_loader is not None: + txt_raw = txt_loader(txt_raw) + ph, txt, word, ph2word, ph_gb_word = cls.txt_to_ph(txt_processor, txt_raw, preprocess_args) + wav_fn, wav_align_fn = cls.process_wav( + item_name, wav_fn, + hparams['processed_data_dir'], + wav_processed_tmp, preprocess_args) + + # wav for binarization + ext = os.path.splitext(wav_fn)[1] + os.makedirs(wav_processed_dir, exist_ok=True) + new_wav_fn = f"{wav_processed_dir}/{item_name}{ext}" + move_link_func = move_file if os.path.dirname(wav_fn) == wav_processed_tmp else link_file + move_link_func(wav_fn, new_wav_fn) + return { + 'txt': txt, 'txt_raw': txt_raw, 'ph': ph, + 'word': word, 'ph2word': ph2word, 'ph_gb_word': ph_gb_word, + 'wav_fn': new_wav_fn, 'wav_align_fn': wav_align_fn, + 'others': others + } + except: + traceback.print_exc() + print(f"| Error is caught. item_name: {item_name}.") + return None + + @staticmethod + def txt_to_ph(txt_processor, txt_raw, preprocess_args): + txt_struct, txt = txt_processor.process(txt_raw, preprocess_args) + ph = [p for w in txt_struct for p in w[1]] + ph_gb_word = ["_".join(w[1]) for w in txt_struct] + words = [w[0] for w in txt_struct] + # word_id=0 is reserved for padding + ph2word = [w_id + 1 for w_id, w in enumerate(txt_struct) for _ in range(len(w[1]))] + return " ".join(ph), txt, " ".join(words), ph2word, " ".join(ph_gb_word) + + @staticmethod + def process_wav(item_name, wav_fn, processed_dir, wav_processed_tmp, preprocess_args): + processors = [get_wav_processor_cls(v) for v in preprocess_args['wav_processors']] + processors = [k() for k in processors if k is not None] + if len(processors) >= 1: + sr_file = librosa.core.get_samplerate(wav_fn) + output_fn_for_align = None + ext = os.path.splitext(wav_fn)[1] + input_fn = f"{wav_processed_tmp}/{item_name}{ext}" + link_file(wav_fn, input_fn) + for p in processors: + outputs = p.process(input_fn, sr_file, wav_processed_tmp, processed_dir, item_name, preprocess_args) + if len(outputs) == 3: + input_fn, sr, output_fn_for_align = outputs + else: + input_fn, sr = outputs + if output_fn_for_align is None: + return input_fn, input_fn + else: + return input_fn, output_fn_for_align + else: + return wav_fn, wav_fn + + def _phone_encoder(self, ph_set): + ph_set_fn = f"{self.processed_dir}/phone_set.json" + if self.preprocess_args['reset_phone_dict'] or not os.path.exists(ph_set_fn): + ph_set = sorted(set(ph_set)) + json.dump(ph_set, open(ph_set_fn, 'w'), ensure_ascii=False) + print("| Build phone set: ", ph_set) + else: + ph_set = json.load(open(ph_set_fn, 'r')) + print("| Load phone set: ", ph_set) + return build_token_encoder(ph_set_fn) + + def _word_encoder(self, word_set): + word_set_fn = f"{self.processed_dir}/word_set.json" + if self.preprocess_args['reset_word_dict']: + word_set = Counter(word_set) + total_words = sum(word_set.values()) + word_set = word_set.most_common(hparams['word_dict_size']) + num_unk_words = total_words - sum([x[1] for x in word_set]) + word_set = ['', ''] + [x[0] for x in word_set] + word_set = sorted(set(word_set)) + json.dump(word_set, open(word_set_fn, 'w'), ensure_ascii=False) + print(f"| Build word set. Size: {len(word_set)}, #total words: {total_words}," + f" #unk_words: {num_unk_words}, word_set[:10]:, {word_set[:10]}.") + else: + word_set = json.load(open(word_set_fn, 'r')) + print("| Load word set. Size: ", len(word_set), word_set[:10]) + return build_token_encoder(word_set_fn) + + @classmethod + def preprocess_second_pass(cls, word, ph, spk_name, word_encoder, ph_encoder, spk_map): + word_token = word_encoder.encode(word) + ph_token = ph_encoder.encode(ph) + spk_id = spk_map[spk_name] + return {'word_token': word_token, 'ph_token': ph_token, 'spk_id': spk_id} + + def build_spk_map(self, spk_names): + spk_map = {x: i for i, x in enumerate(sorted(list(spk_names)))} + assert len(spk_map) == 0 or len(spk_map) <= hparams['num_spk'], len(spk_map) + print(f"| Number of spks: {len(spk_map)}, spk_map: {spk_map}") + json.dump(spk_map, open(self.spk_map_fn, 'w'), ensure_ascii=False) + return spk_map + + @classmethod + def build_mfa_inputs(cls, item, mfa_input_dir, mfa_group, wav_processed_tmp, preprocess_args): + item_name = item['item_name'] + wav_align_fn = item['wav_align_fn'] + ph_gb_word = item['ph_gb_word'] + ext = os.path.splitext(wav_align_fn)[1] + mfa_input_group_dir = f'{mfa_input_dir}/{mfa_group}' + os.makedirs(mfa_input_group_dir, exist_ok=True) + new_wav_align_fn = f"{mfa_input_group_dir}/{item_name}{ext}" + move_link_func = move_file if os.path.dirname(wav_align_fn) == wav_processed_tmp else link_file + move_link_func(wav_align_fn, new_wav_align_fn) + ph_gb_word_nosil = " ".join(["_".join([p for p in w.split("_") if not is_sil_phoneme(p)]) + for w in ph_gb_word.split(" ") if not is_sil_phoneme(w)]) + with open(f'{mfa_input_group_dir}/{item_name}.lab', 'w') as f_txt: + f_txt.write(ph_gb_word_nosil) + return ph_gb_word_nosil, new_wav_align_fn + + def load_spk_map(self, base_dir): + spk_map_fn = f"{base_dir}/spk_map.json" + spk_map = json.load(open(spk_map_fn, 'r')) + return spk_map + + def load_dict(self, base_dir): + ph_encoder = build_token_encoder(f'{base_dir}/phone_set.json') + word_encoder = build_token_encoder(f'{base_dir}/word_set.json') + return ph_encoder, word_encoder + + @property + def meta_csv_filename(self): + return 'metadata' + + @property + def wav_processed_dirname(self): + return 'wav_processed' \ No newline at end of file diff --git a/NeuralSeq/data_gen/tts/binarizer_zh.py b/NeuralSeq/data_gen/tts/binarizer_zh.py new file mode 100644 index 0000000000000000000000000000000000000000..7bd424a1a669ecf1a74cb69d690a690d0d39fe55 --- /dev/null +++ b/NeuralSeq/data_gen/tts/binarizer_zh.py @@ -0,0 +1,59 @@ +import os + +os.environ["OMP_NUM_THREADS"] = "1" + +from data_gen.tts.txt_processors.zh_g2pM import ALL_SHENMU +from data_gen.tts.base_binarizer import BaseBinarizer, BinarizationError +from data_gen.tts.data_gen_utils import get_mel2ph +from utils.hparams import set_hparams, hparams +import numpy as np + + +class ZhBinarizer(BaseBinarizer): + @staticmethod + def get_align(tg_fn, ph, mel, phone_encoded, res): + if tg_fn is not None and os.path.exists(tg_fn): + _, dur = get_mel2ph(tg_fn, ph, mel, hparams) + else: + raise BinarizationError(f"Align not found") + ph_list = ph.split(" ") + assert len(dur) == len(ph_list) + mel2ph = [] + # 分隔符的时长分配给韵母 + dur_cumsum = np.pad(np.cumsum(dur), [1, 0], mode='constant', constant_values=0) + for i in range(len(dur)): + p = ph_list[i] + if p[0] != '<' and not p[0].isalpha(): + uv_ = res['f0'][dur_cumsum[i]:dur_cumsum[i + 1]] == 0 + j = 0 + while j < len(uv_) and not uv_[j]: + j += 1 + dur[i - 1] += j + dur[i] -= j + if dur[i] < 100: + dur[i - 1] += dur[i] + dur[i] = 0 + # 声母和韵母等长 + for i in range(len(dur)): + p = ph_list[i] + if p in ALL_SHENMU: + p_next = ph_list[i + 1] + if not (dur[i] > 0 and p_next[0].isalpha() and p_next not in ALL_SHENMU): + print(f"assert dur[i] > 0 and p_next[0].isalpha() and p_next not in ALL_SHENMU, " + f"dur[i]: {dur[i]}, p: {p}, p_next: {p_next}.") + continue + total = dur[i + 1] + dur[i] + dur[i] = total // 2 + dur[i + 1] = total - dur[i] + for i in range(len(dur)): + mel2ph += [i + 1] * dur[i] + mel2ph = np.array(mel2ph) + if mel2ph.max() - 1 >= len(phone_encoded): + raise BinarizationError(f"| Align does not match: {(mel2ph.max() - 1, len(phone_encoded))}") + res['mel2ph'] = mel2ph + res['dur'] = dur + + +if __name__ == "__main__": + set_hparams() + ZhBinarizer().process() diff --git a/NeuralSeq/data_gen/tts/data_gen_utils.py b/NeuralSeq/data_gen/tts/data_gen_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..57ccb7c0200de5124908db2cba0347baf2663adc --- /dev/null +++ b/NeuralSeq/data_gen/tts/data_gen_utils.py @@ -0,0 +1,357 @@ +import warnings + +warnings.filterwarnings("ignore") + +import parselmouth +import os +import torch +from skimage.transform import resize +from utils.text_encoder import TokenTextEncoder +from utils.pitch_utils import f0_to_coarse +import struct +import webrtcvad +from scipy.ndimage.morphology import binary_dilation +import librosa +import numpy as np +from utils import audio +import pyloudnorm as pyln +import re +import json +from collections import OrderedDict + +PUNCS = '!,.?;:' + +int16_max = (2 ** 15) - 1 + + +def trim_long_silences(path, sr=None, return_raw_wav=False, norm=True, vad_max_silence_length=12): + """ + Ensures that segments without voice in the waveform remain no longer than a + threshold determined by the VAD parameters in params.py. + :param wav: the raw waveform as a numpy array of floats + :param vad_max_silence_length: Maximum number of consecutive silent frames a segment can have. + :return: the same waveform with silences trimmed away (length <= original wav length) + """ + + ## Voice Activation Detection + # Window size of the VAD. Must be either 10, 20 or 30 milliseconds. + # This sets the granularity of the VAD. Should not need to be changed. + sampling_rate = 16000 + wav_raw, sr = librosa.core.load(path, sr=sr) + + if norm: + meter = pyln.Meter(sr) # create BS.1770 meter + loudness = meter.integrated_loudness(wav_raw) + wav_raw = pyln.normalize.loudness(wav_raw, loudness, -20.0) + if np.abs(wav_raw).max() > 1.0: + wav_raw = wav_raw / np.abs(wav_raw).max() + + wav = librosa.resample(wav_raw, sr, sampling_rate, res_type='kaiser_best') + + vad_window_length = 30 # In milliseconds + # Number of frames to average together when performing the moving average smoothing. + # The larger this value, the larger the VAD variations must be to not get smoothed out. + vad_moving_average_width = 8 + + # Compute the voice detection window size + samples_per_window = (vad_window_length * sampling_rate) // 1000 + + # Trim the end of the audio to have a multiple of the window size + wav = wav[:len(wav) - (len(wav) % samples_per_window)] + + # Convert the float waveform to 16-bit mono PCM + pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16)) + + # Perform voice activation detection + voice_flags = [] + vad = webrtcvad.Vad(mode=3) + for window_start in range(0, len(wav), samples_per_window): + window_end = window_start + samples_per_window + voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2], + sample_rate=sampling_rate)) + voice_flags = np.array(voice_flags) + + # Smooth the voice detection with a moving average + def moving_average(array, width): + array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2))) + ret = np.cumsum(array_padded, dtype=float) + ret[width:] = ret[width:] - ret[:-width] + return ret[width - 1:] / width + + audio_mask = moving_average(voice_flags, vad_moving_average_width) + audio_mask = np.round(audio_mask).astype(np.bool) + + # Dilate the voiced regions + audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1)) + audio_mask = np.repeat(audio_mask, samples_per_window) + audio_mask = resize(audio_mask, (len(wav_raw),)) > 0 + if return_raw_wav: + return wav_raw, audio_mask, sr + return wav_raw[audio_mask], audio_mask, sr + + +def process_utterance(wav_path, + fft_size=1024, + hop_size=256, + win_length=1024, + window="hann", + num_mels=80, + fmin=80, + fmax=7600, + eps=1e-6, + sample_rate=22050, + loud_norm=False, + min_level_db=-100, + return_linear=False, + trim_long_sil=False, vocoder='pwg'): + if isinstance(wav_path, str): + if trim_long_sil: + wav, _, _ = trim_long_silences(wav_path, sample_rate) + else: + wav, _ = librosa.core.load(wav_path, sr=sample_rate) + else: + wav = wav_path + + if loud_norm: + meter = pyln.Meter(sample_rate) # create BS.1770 meter + loudness = meter.integrated_loudness(wav) + wav = pyln.normalize.loudness(wav, loudness, -22.0) + if np.abs(wav).max() > 1: + wav = wav / np.abs(wav).max() + + # get amplitude spectrogram + x_stft = librosa.stft(wav, n_fft=fft_size, hop_length=hop_size, + win_length=win_length, window=window, pad_mode="constant") + spc = np.abs(x_stft) # (n_bins, T) + + # get mel basis + fmin = 0 if fmin == -1 else fmin + fmax = sample_rate / 2 if fmax == -1 else fmax + mel_basis = librosa.filters.mel(sample_rate, fft_size, num_mels, fmin, fmax) + mel = mel_basis @ spc + + if vocoder == 'pwg': + mel = np.log10(np.maximum(eps, mel)) # (n_mel_bins, T) + else: + assert False, f'"{vocoder}" is not in ["pwg"].' + + l_pad, r_pad = audio.librosa_pad_lr(wav, fft_size, hop_size, 1) + wav = np.pad(wav, (l_pad, r_pad), mode='constant', constant_values=0.0) + wav = wav[:mel.shape[1] * hop_size] + + if not return_linear: + return wav, mel + else: + spc = audio.amp_to_db(spc) + spc = audio.normalize(spc, {'min_level_db': min_level_db}) + return wav, mel, spc + + +def get_pitch(wav_data, mel, hparams): + """ + + :param wav_data: [T] + :param mel: [T, 80] + :param hparams: + :return: + """ + time_step = hparams['hop_size'] / hparams['audio_sample_rate'] * 1000 + f0_min = 80 + f0_max = 750 + + if hparams['hop_size'] == 128: + pad_size = 4 + elif hparams['hop_size'] == 256: + pad_size = 2 + else: + assert False + + f0 = parselmouth.Sound(wav_data, hparams['audio_sample_rate']).to_pitch_ac( + time_step=time_step / 1000, voicing_threshold=0.6, + pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency'] + lpad = pad_size * 2 + rpad = len(mel) - len(f0) - lpad + f0 = np.pad(f0, [[lpad, rpad]], mode='constant') + # mel and f0 are extracted by 2 different libraries. we should force them to have the same length. + # Attention: we find that new version of some libraries could cause ``rpad'' to be a negetive value... + # Just to be sure, we recommend users to set up the same environments as them in requirements_auto.txt (by Anaconda) + delta_l = len(mel) - len(f0) + assert np.abs(delta_l) <= 8 + if delta_l > 0: + f0 = np.concatenate([f0, [f0[-1]] * delta_l], 0) + f0 = f0[:len(mel)] + pitch_coarse = f0_to_coarse(f0) + return f0, pitch_coarse + + +def remove_empty_lines(text): + """remove empty lines""" + assert (len(text) > 0) + assert (isinstance(text, list)) + text = [t.strip() for t in text] + if "" in text: + text.remove("") + return text + + +class TextGrid(object): + def __init__(self, text): + text = remove_empty_lines(text) + self.text = text + self.line_count = 0 + self._get_type() + self._get_time_intval() + self._get_size() + self.tier_list = [] + self._get_item_list() + + def _extract_pattern(self, pattern, inc): + """ + Parameters + ---------- + pattern : regex to extract pattern + inc : increment of line count after extraction + Returns + ------- + group : extracted info + """ + try: + group = re.match(pattern, self.text[self.line_count]).group(1) + self.line_count += inc + except AttributeError: + raise ValueError("File format error at line %d:%s" % (self.line_count, self.text[self.line_count])) + return group + + def _get_type(self): + self.file_type = self._extract_pattern(r"File type = \"(.*)\"", 2) + + def _get_time_intval(self): + self.xmin = self._extract_pattern(r"xmin = (.*)", 1) + self.xmax = self._extract_pattern(r"xmax = (.*)", 2) + + def _get_size(self): + self.size = int(self._extract_pattern(r"size = (.*)", 2)) + + def _get_item_list(self): + """Only supports IntervalTier currently""" + for itemIdx in range(1, self.size + 1): + tier = OrderedDict() + item_list = [] + tier_idx = self._extract_pattern(r"item \[(.*)\]:", 1) + tier_class = self._extract_pattern(r"class = \"(.*)\"", 1) + if tier_class != "IntervalTier": + raise NotImplementedError("Only IntervalTier class is supported currently") + tier_name = self._extract_pattern(r"name = \"(.*)\"", 1) + tier_xmin = self._extract_pattern(r"xmin = (.*)", 1) + tier_xmax = self._extract_pattern(r"xmax = (.*)", 1) + tier_size = self._extract_pattern(r"intervals: size = (.*)", 1) + for i in range(int(tier_size)): + item = OrderedDict() + item["idx"] = self._extract_pattern(r"intervals \[(.*)\]", 1) + item["xmin"] = self._extract_pattern(r"xmin = (.*)", 1) + item["xmax"] = self._extract_pattern(r"xmax = (.*)", 1) + item["text"] = self._extract_pattern(r"text = \"(.*)\"", 1) + item_list.append(item) + tier["idx"] = tier_idx + tier["class"] = tier_class + tier["name"] = tier_name + tier["xmin"] = tier_xmin + tier["xmax"] = tier_xmax + tier["size"] = tier_size + tier["items"] = item_list + self.tier_list.append(tier) + + def toJson(self): + _json = OrderedDict() + _json["file_type"] = self.file_type + _json["xmin"] = self.xmin + _json["xmax"] = self.xmax + _json["size"] = self.size + _json["tiers"] = self.tier_list + return json.dumps(_json, ensure_ascii=False, indent=2) + + +def get_mel2ph(tg_fn, ph, mel, hparams): + ph_list = ph.split(" ") + with open(tg_fn, "r") as f: + tg = f.readlines() + tg = remove_empty_lines(tg) + tg = TextGrid(tg) + tg = json.loads(tg.toJson()) + split = np.ones(len(ph_list) + 1, np.float) * -1 + tg_idx = 0 + ph_idx = 0 + tg_align = [x for x in tg['tiers'][-1]['items']] + tg_align_ = [] + for x in tg_align: + x['xmin'] = float(x['xmin']) + x['xmax'] = float(x['xmax']) + if x['text'] in ['sil', 'sp', '', 'SIL', 'PUNC']: + x['text'] = '' + if len(tg_align_) > 0 and tg_align_[-1]['text'] == '': + tg_align_[-1]['xmax'] = x['xmax'] + continue + tg_align_.append(x) + tg_align = tg_align_ + tg_len = len([x for x in tg_align if x['text'] != '']) + ph_len = len([x for x in ph_list if not is_sil_phoneme(x)]) + assert tg_len == ph_len, (tg_len, ph_len, tg_align, ph_list, tg_fn) + while tg_idx < len(tg_align) or ph_idx < len(ph_list): + if tg_idx == len(tg_align) and is_sil_phoneme(ph_list[ph_idx]): + split[ph_idx] = 1e8 + ph_idx += 1 + continue + x = tg_align[tg_idx] + if x['text'] == '' and ph_idx == len(ph_list): + tg_idx += 1 + continue + assert ph_idx < len(ph_list), (tg_len, ph_len, tg_align, ph_list, tg_fn) + ph = ph_list[ph_idx] + if x['text'] == '' and not is_sil_phoneme(ph): + assert False, (ph_list, tg_align) + if x['text'] != '' and is_sil_phoneme(ph): + ph_idx += 1 + else: + assert (x['text'] == '' and is_sil_phoneme(ph)) \ + or x['text'].lower() == ph.lower() \ + or x['text'].lower() == 'sil', (x['text'], ph) + split[ph_idx] = x['xmin'] + if ph_idx > 0 and split[ph_idx - 1] == -1 and is_sil_phoneme(ph_list[ph_idx - 1]): + split[ph_idx - 1] = split[ph_idx] + ph_idx += 1 + tg_idx += 1 + assert tg_idx == len(tg_align), (tg_idx, [x['text'] for x in tg_align]) + assert ph_idx >= len(ph_list) - 1, (ph_idx, ph_list, len(ph_list), [x['text'] for x in tg_align], tg_fn) + mel2ph = np.zeros([mel.shape[0]], np.int) + split[0] = 0 + split[-1] = 1e8 + for i in range(len(split) - 1): + assert split[i] != -1 and split[i] <= split[i + 1], (split[:-1],) + split = [int(s * hparams['audio_sample_rate'] / hparams['hop_size'] + 0.5) for s in split] + for ph_idx in range(len(ph_list)): + mel2ph[split[ph_idx]:split[ph_idx + 1]] = ph_idx + 1 + mel2ph_torch = torch.from_numpy(mel2ph) + T_t = len(ph_list) + dur = mel2ph_torch.new_zeros([T_t + 1]).scatter_add(0, mel2ph_torch, torch.ones_like(mel2ph_torch)) + dur = dur[1:].numpy() + return mel2ph, dur + + +def build_phone_encoder(data_dir): + phone_list_file = os.path.join(data_dir, 'phone_set.json') + phone_list = json.load(open(phone_list_file)) + return TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',') + + +def build_word_encoder(data_dir): + word_list_file = os.path.join(data_dir, 'word_set.json') + word_list = json.load(open(word_list_file)) + return TokenTextEncoder(None, vocab_list=word_list, replace_oov=',') + +def is_sil_phoneme(p): + return not p[0].isalpha() + + +def build_token_encoder(token_list_file): + token_list = json.load(open(token_list_file)) + return TokenTextEncoder(None, vocab_list=token_list, replace_oov='') diff --git a/NeuralSeq/data_gen/tts/emotion/__pycache__/audio.cpython-38.pyc b/NeuralSeq/data_gen/tts/emotion/__pycache__/audio.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43f7ae5d1841b4e6b80652d46ef9b85920319179 Binary files /dev/null and b/NeuralSeq/data_gen/tts/emotion/__pycache__/audio.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/emotion/__pycache__/inference.cpython-38.pyc b/NeuralSeq/data_gen/tts/emotion/__pycache__/inference.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39fd3fcfce30afa3e8cacd24ed124a0e64300e86 Binary files /dev/null and b/NeuralSeq/data_gen/tts/emotion/__pycache__/inference.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/emotion/__pycache__/model.cpython-38.pyc b/NeuralSeq/data_gen/tts/emotion/__pycache__/model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80dce017ae64c9265602e206e3f5f429df0b27e2 Binary files /dev/null and b/NeuralSeq/data_gen/tts/emotion/__pycache__/model.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/emotion/__pycache__/params_data.cpython-38.pyc b/NeuralSeq/data_gen/tts/emotion/__pycache__/params_data.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5eb696abf3900b3b6af1b996dc9be6e1cd37188 Binary files /dev/null and b/NeuralSeq/data_gen/tts/emotion/__pycache__/params_data.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/emotion/__pycache__/params_model.cpython-38.pyc b/NeuralSeq/data_gen/tts/emotion/__pycache__/params_model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7b679b2370b42002c5aade745dbce07af9de027 Binary files /dev/null and b/NeuralSeq/data_gen/tts/emotion/__pycache__/params_model.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/emotion/audio.py b/NeuralSeq/data_gen/tts/emotion/audio.py new file mode 100644 index 0000000000000000000000000000000000000000..2a5eed96f08995c4f7e872a0f28a02bf0f0ad4ab --- /dev/null +++ b/NeuralSeq/data_gen/tts/emotion/audio.py @@ -0,0 +1,107 @@ +from scipy.ndimage.morphology import binary_dilation +from data_gen.tts.emotion.params_data import * +from pathlib import Path +from typing import Optional, Union +import numpy as np +import webrtcvad +import librosa +import struct + +int16_max = (2 ** 15) - 1 + + +def preprocess_wav(fpath_or_wav: Union[str, Path, np.ndarray], + source_sr: Optional[int] = None): + """ + Applies the preprocessing operations used in training the Speaker Encoder to a waveform + either on disk or in memory. The waveform will be resampled to match the data hyperparameters. + + :param fpath_or_wav: either a filepath to an audio file (many extensions are supported, not + just .wav), either the waveform as a numpy array of floats. + :param source_sr: if passing an audio waveform, the sampling rate of the waveform before + preprocessing. After preprocessing, the waveform's sampling rate will match the data + hyperparameters. If passing a filepath, the sampling rate will be automatically detected and + this argument will be ignored. + """ + # Load the wav from disk if needed + if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path): + wav, source_sr = librosa.load(str(fpath_or_wav), sr=None) + else: + wav = fpath_or_wav + + # Resample the wav if needed + if source_sr is not None and source_sr != sampling_rate: + wav = librosa.resample(wav, source_sr, sampling_rate) + + # Apply the preprocessing: normalize volume and shorten long silences + wav = normalize_volume(wav, audio_norm_target_dBFS, increase_only=True) + wav = trim_long_silences(wav) + + return wav + + +def wav_to_mel_spectrogram(wav): + """ + Derives a mel spectrogram ready to be used by the encoder from a preprocessed audio waveform. + Note: this not a log-mel spectrogram. + """ + frames = librosa.feature.melspectrogram( + wav, + sampling_rate, + n_fft=int(sampling_rate * mel_window_length / 1000), + hop_length=int(sampling_rate * mel_window_step / 1000), + n_mels=mel_n_channels + ) + return frames.astype(np.float32).T + + +def trim_long_silences(wav): + """ + Ensures that segments without voice in the waveform remain no longer than a + threshold determined by the VAD parameters in params.py. + + :param wav: the raw waveform as a numpy array of floats + :return: the same waveform with silences trimmed away (length <= original wav length) + """ + # Compute the voice detection window size + samples_per_window = (vad_window_length * sampling_rate) // 1000 + + # Trim the end of the audio to have a multiple of the window size + wav = wav[:len(wav) - (len(wav) % samples_per_window)] + + # Convert the float waveform to 16-bit mono PCM + pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16)) + + # Perform voice activation detection + voice_flags = [] + vad = webrtcvad.Vad(mode=3) + for window_start in range(0, len(wav), samples_per_window): + window_end = window_start + samples_per_window + voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2], + sample_rate=sampling_rate)) + voice_flags = np.array(voice_flags) + + # Smooth the voice detection with a moving average + def moving_average(array, width): + array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2))) + ret = np.cumsum(array_padded, dtype=float) + ret[width:] = ret[width:] - ret[:-width] + return ret[width - 1:] / width + + audio_mask = moving_average(voice_flags, vad_moving_average_width) + audio_mask = np.round(audio_mask).astype(np.bool) + + # Dilate the voiced regions + audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1)) + audio_mask = np.repeat(audio_mask, samples_per_window) + + return wav[audio_mask == True] + + +def normalize_volume(wav, target_dBFS, increase_only=False, decrease_only=False): + if increase_only and decrease_only: + raise ValueError("Both increase only and decrease only are set") + dBFS_change = target_dBFS - 10 * np.log10(np.mean(wav ** 2)) + if (dBFS_change < 0 and increase_only) or (dBFS_change > 0 and decrease_only): + return wav + return wav * (10 ** (dBFS_change / 20)) diff --git a/NeuralSeq/data_gen/tts/emotion/inference.py b/NeuralSeq/data_gen/tts/emotion/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..0bda414e67e4f2a6e829930c85352a0a41a7f6d9 --- /dev/null +++ b/NeuralSeq/data_gen/tts/emotion/inference.py @@ -0,0 +1,177 @@ +from data_gen.tts.emotion.params_data import * +from data_gen.tts.emotion.model import EmotionEncoder +from data_gen.tts.emotion.audio import preprocess_wav # We want to expose this function from here +from matplotlib import cm +from data_gen.tts.emotion import audio +from pathlib import Path +import matplotlib.pyplot as plt +import numpy as np +import torch + +_model = None # type: EmotionEncoder +_device = None # type: torch.device + + +def load_model(weights_fpath: Path, device=None): + """ + Loads the model in memory. If this function is not explicitely called, it will be run on the + first call to embed_frames() with the default weights file. + + :param weights_fpath: the path to saved model weights. + :param device: either a torch device or the name of a torch device (e.g. "cpu", "cuda"). The + model will be loaded and will run on this device. Outputs will however always be on the cpu. + If None, will default to your GPU if it"s available, otherwise your CPU. + """ + # TODO: I think the slow loading of the encoder might have something to do with the device it + # was saved on. Worth investigating. + global _model, _device + if device is None: + _device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + elif isinstance(device, str): + _device = torch.device(device) + _model = EmotionEncoder(_device, torch.device("cpu")) + checkpoint = torch.load(weights_fpath) + _model.load_state_dict(checkpoint["model_state"]) + _model.eval() + print("Loaded encoder trained to step %d" % (checkpoint["step"])) + + +def is_loaded(): + return _model is not None + + +def embed_frames_batch(frames_batch): + """ + Computes embeddings for a batch of mel spectrogram. + + :param frames_batch: a batch mel of spectrogram as a numpy array of float32 of shape + (batch_size, n_frames, n_channels) + :return: the embeddings as a numpy array of float32 of shape (batch_size, model_embedding_size) + """ + if _model is None: + raise Exception("Model was not loaded. Call load_model() before inference.") + + frames = torch.from_numpy(frames_batch).to(_device) + embed = _model.inference(frames).detach().cpu().numpy() + return embed + + +def compute_partial_slices(n_samples, partial_utterance_n_frames=partials_n_frames, + min_pad_coverage=0.75, overlap=0.5): + """ + Computes where to split an utterance waveform and its corresponding mel spectrogram to obtain + partial utterances of each. Both the waveform and the mel + spectrogram slices are returned, so as to make each partial utterance waveform correspond to + its spectrogram. This function assumes that the mel spectrogram parameters used are those + defined in params_data.py. + + The returned ranges may be indexing further than the length of the waveform. It is + recommended that you pad the waveform with zeros up to wave_slices[-1].stop. + + :param n_samples: the number of samples in the waveform + :param partial_utterance_n_frames: the number of mel spectrogram frames in each partial + utterance + :param min_pad_coverage: when reaching the last partial utterance, it may or may not have + enough frames. If at least of are present, + then the last partial utterance will be considered, as if we padded the audio. Otherwise, + it will be discarded, as if we trimmed the audio. If there aren't enough frames for 1 partial + utterance, this parameter is ignored so that the function always returns at least 1 slice. + :param overlap: by how much the partial utterance should overlap. If set to 0, the partial + utterances are entirely disjoint. + :return: the waveform slices and mel spectrogram slices as lists of array slices. Index + respectively the waveform and the mel spectrogram with these slices to obtain the partial + utterances. + """ + assert 0 <= overlap < 1 + assert 0 < min_pad_coverage <= 1 + + samples_per_frame = int((sampling_rate * mel_window_step / 1000)) + n_frames = int(np.ceil((n_samples + 1) / samples_per_frame)) + frame_step = max(int(np.round(partial_utterance_n_frames * (1 - overlap))), 1) + + # Compute the slices + wav_slices, mel_slices = [], [] + steps = max(1, n_frames - partial_utterance_n_frames + frame_step + 1) + for i in range(0, steps, frame_step): + mel_range = np.array([i, i + partial_utterance_n_frames]) + wav_range = mel_range * samples_per_frame + mel_slices.append(slice(*mel_range)) + wav_slices.append(slice(*wav_range)) + + # Evaluate whether extra padding is warranted or not + last_wav_range = wav_slices[-1] + coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start) + if coverage < min_pad_coverage and len(mel_slices) > 1: + mel_slices = mel_slices[:-1] + wav_slices = wav_slices[:-1] + + return wav_slices, mel_slices + + +def embed_utterance(wav, using_partials=True, return_partials=False, **kwargs): + """ + Computes an embedding for a single utterance. + + # TODO: handle multiple wavs to benefit from batching on GPU + :param wav: a preprocessed (see audio.py) utterance waveform as a numpy array of float32 + :param using_partials: if True, then the utterance is split in partial utterances of + frames and the utterance embedding is computed from their + normalized average. If False, the utterance is instead computed from feeding the entire + spectogram to the network. + :param return_partials: if True, the partial embeddings will also be returned along with the + wav slices that correspond to the partial embeddings. + :param kwargs: additional arguments to compute_partial_splits() + :return: the embedding as a numpy array of float32 of shape (model_embedding_size,). If + is True, the partial utterances as a numpy array of float32 of shape + (n_partials, model_embedding_size) and the wav partials as a list of slices will also be + returned. If is simultaneously set to False, both these values will be None + instead. + """ + # Process the entire utterance if not using partials + if not using_partials: + frames = audio.wav_to_mel_spectrogram(wav) + embed = embed_frames_batch(frames[None, ...])[0] + if return_partials: + return embed, None, None + return embed + + # Compute where to split the utterance into partials and pad if necessary + wave_slices, mel_slices = compute_partial_slices(len(wav), **kwargs) + max_wave_length = wave_slices[-1].stop + if max_wave_length >= len(wav): + wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant") + + # Split the utterance into partials + frames = audio.wav_to_mel_spectrogram(wav) + frames_batch = np.array([frames[s] for s in mel_slices]) + partial_embeds = embed_frames_batch(frames_batch) + + # Compute the utterance embedding from the partial embeddings + raw_embed = np.mean(partial_embeds, axis=0) + embed = raw_embed / np.linalg.norm(raw_embed, 2) + + if return_partials: + return embed, partial_embeds, wave_slices + return embed + + +def embed_speaker(wavs, **kwargs): + raise NotImplemented() + + +def plot_embedding_as_heatmap(embed, ax=None, title="", shape=None, color_range=(0, 0.30)): + if ax is None: + ax = plt.gca() + + if shape is None: + height = int(np.sqrt(len(embed))) + shape = (height, -1) + embed = embed.reshape(shape) + + cmap = cm.get_cmap() + mappable = ax.imshow(embed, cmap=cmap) + cbar = plt.colorbar(mappable, ax=ax, fraction=0.046, pad=0.04) + cbar.set_clim(*color_range) + + ax.set_xticks([]), ax.set_yticks([]) + ax.set_title(title) diff --git a/NeuralSeq/data_gen/tts/emotion/model.py b/NeuralSeq/data_gen/tts/emotion/model.py new file mode 100644 index 0000000000000000000000000000000000000000..63dbae5a5f38d370466536646a55defd6df7de3d --- /dev/null +++ b/NeuralSeq/data_gen/tts/emotion/model.py @@ -0,0 +1,78 @@ + +from data_gen.tts.emotion.params_model import * +from data_gen.tts.emotion.params_data import * +from torch.nn.utils import clip_grad_norm_ +from scipy.optimize import brentq +from torch import nn +import numpy as np +import torch + + +class EmotionEncoder(nn.Module): + def __init__(self, device, loss_device): + super().__init__() + self.loss_device = loss_device + + # Network defition + self.lstm = nn.LSTM(input_size=mel_n_channels, + hidden_size=model_hidden_size, + num_layers=model_num_layers, + batch_first=True).to(device) + self.linear = nn.Linear(in_features=model_hidden_size, + out_features=model_embedding_size).to(device) + self.relu = torch.nn.ReLU().to(device) + + + # Cosine similarity scaling (with fixed initial parameter values) + self.similarity_weight = nn.Parameter(torch.tensor([10.])).to(loss_device) + self.similarity_bias = nn.Parameter(torch.tensor([-5.])).to(loss_device) + + # Loss + self.loss_fn = nn.CrossEntropyLoss().to(loss_device) + + def do_gradient_ops(self): + # Gradient scale + self.similarity_weight.grad *= 0.01 + self.similarity_bias.grad *= 0.01 + + # Gradient clipping + clip_grad_norm_(self.parameters(), 3, norm_type=2) + + def forward(self, utterances, hidden_init=None): + """ + Computes the embeddings of a batch of utterance spectrograms. + + :param utterances: batch of mel-scale filterbanks of same duration as a tensor of shape + (batch_size, n_frames, n_channels) + :param hidden_init: initial hidden state of the LSTM as a tensor of shape (num_layers, + batch_size, hidden_size). Will default to a tensor of zeros if None. + :return: the embeddings as a tensor of shape (batch_size, embedding_size) + """ + # Pass the input through the LSTM layers and retrieve all outputs, the final hidden state + # and the final cell state. + out, (hidden, cell) = self.lstm(utterances, hidden_init) + + # We take only the hidden state of the last layer + embeds_raw = self.relu(self.linear(hidden[-1])) + + # L2-normalize it + embeds = embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) + + return embeds + + def inference(self, utterances, hidden_init=None): + """ + Computes the embeddings of a batch of utterance spectrograms. + + :param utterances: batch of mel-scale filterbanks of same duration as a tensor of shape + (batch_size, n_frames, n_channels) + :param hidden_init: initial hidden state of the LSTM as a tensor of shape (num_layers, + batch_size, hidden_size). Will default to a tensor of zeros if None. + :return: the embeddings as a tensor of shape (batch_size, embedding_size) + """ + # Pass the input through the LSTM layers and retrieve all outputs, the final hidden state + # and the final cell state. + + out, (hidden, cell) = self.lstm(utterances, hidden_init) + + return hidden[-1] \ No newline at end of file diff --git a/NeuralSeq/data_gen/tts/emotion/params_data.py b/NeuralSeq/data_gen/tts/emotion/params_data.py new file mode 100644 index 0000000000000000000000000000000000000000..bdb1716ed45617f2b127a7fb8885afe6cc74fb71 --- /dev/null +++ b/NeuralSeq/data_gen/tts/emotion/params_data.py @@ -0,0 +1,29 @@ + +## Mel-filterbank +mel_window_length = 25 # In milliseconds +mel_window_step = 10 # In milliseconds +mel_n_channels = 40 + + +## Audio +sampling_rate = 16000 +# Number of spectrogram frames in a partial utterance +partials_n_frames = 160 # 1600 ms +# Number of spectrogram frames at inference +inference_n_frames = 80 # 800 ms + + +## Voice Activation Detection +# Window size of the VAD. Must be either 10, 20 or 30 milliseconds. +# This sets the granularity of the VAD. Should not need to be changed. +vad_window_length = 30 # In milliseconds +# Number of frames to average together when performing the moving average smoothing. +# The larger this value, the larger the VAD variations must be to not get smoothed out. +vad_moving_average_width = 8 +# Maximum number of consecutive silent frames a segment can have. +vad_max_silence_length = 6 + + +## Audio volume normalization +audio_norm_target_dBFS = -30 + diff --git a/NeuralSeq/data_gen/tts/emotion/params_model.py b/NeuralSeq/data_gen/tts/emotion/params_model.py new file mode 100644 index 0000000000000000000000000000000000000000..48f8e564e772649e3207c7a90bff1bee9e6b3a47 --- /dev/null +++ b/NeuralSeq/data_gen/tts/emotion/params_model.py @@ -0,0 +1,11 @@ + +## Model parameters +model_hidden_size = 256 +model_embedding_size = 256 +model_num_layers = 3 + + +## Training parameters +learning_rate_init = 1e-4 +speakers_per_batch = 6 +utterances_per_speaker = 20 diff --git a/NeuralSeq/data_gen/tts/emotion/test_emotion.py b/NeuralSeq/data_gen/tts/emotion/test_emotion.py new file mode 100644 index 0000000000000000000000000000000000000000..97d702e2b426e0e9e8fb821a8f91183cf7631224 --- /dev/null +++ b/NeuralSeq/data_gen/tts/emotion/test_emotion.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +Run inference for pre-processed data with a trained model. +""" + +import logging +import math +import numpy, math, pdb, sys, random +import time, os, itertools, shutil, importlib +import argparse +import os +import sys +import glob +from sklearn import metrics +import soundfile as sf +#import sentencepiece as spm +import torch +import inference as encoder +import torch.nn as nn +import torch.nn.functional as F +from pathlib import Path +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) +from resemblyzer import VoiceEncoder, preprocess_wav + + +def tuneThresholdfromScore(scores, labels, target_fa, target_fr=None): + fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=1) + fnr = 1 - tpr + + fnr = fnr * 100 + fpr = fpr * 100 + + tunedThreshold = []; + if target_fr: + for tfr in target_fr: + idx = numpy.nanargmin(numpy.absolute((tfr - fnr))) + tunedThreshold.append([thresholds[idx], fpr[idx], fnr[idx]]); + + for tfa in target_fa: + idx = numpy.nanargmin(numpy.absolute((tfa - fpr))) # numpy.where(fpr<=tfa)[0][-1] + tunedThreshold.append([thresholds[idx], fpr[idx], fnr[idx]]); + + idxE = numpy.nanargmin(numpy.absolute((fnr - fpr))) + eer = max(fpr[idxE], fnr[idxE]) + + return (tunedThreshold, eer, fpr, fnr); + + +def loadWAV(filename, max_frames, evalmode=True, num_eval=10): + # Maximum audio length + max_audio = max_frames * 160 + 240 + + # Read wav file and convert to torch tensor + audio,sample_rate = sf.read(filename) + + feats_v0 = torch.from_numpy(audio).float() + audiosize = audio.shape[0] + + if audiosize <= max_audio: + shortage = math.floor((max_audio - audiosize + 1) / 2) + audio = numpy.pad(audio, (shortage, shortage), 'constant', constant_values=0) + audiosize = audio.shape[0] + + if evalmode: + startframe = numpy.linspace(0, audiosize - max_audio, num=num_eval) + else: + startframe = numpy.array([numpy.int64(random.random() * (audiosize - max_audio))]) + feats = [] + if evalmode and max_frames == 0: + feats.append(audio) + else: + for asf in startframe: + feats.append(audio[int(asf):int(asf) + max_audio]) + feat = numpy.stack(feats, axis=0) + feat = torch.FloatTensor(feat) + return feat; + +def evaluateFromList(listfilename, print_interval=100, test_path='', multi=False): + + lines = [] + files = [] + feats = {} + tstart = time.time() + + ## Read all lines + with open(listfilename) as listfile: + while True: + line = listfile.readline(); + if (not line): + break; + + data = line.split(); + + ## Append random label if missing + if len(data) == 2: data = [random.randint(0,1)] + data + + files.append(data[1]) + files.append(data[2]) + lines.append(line) + + setfiles = list(set(files)) + setfiles.sort() + ## Save all features to file + for idx, file in enumerate(setfiles): + # preprocessed_wav = encoder.preprocess_wav(os.path.join(test_path,file)) + # embed = encoder.embed_utterance(preprocessed_wav) + processed_wav = preprocess_wav(os.path.join(test_path,file)) + embed = voice_encoder.embed_utterance(processed_wav) + + torch.cuda.empty_cache() + ref_feat = torch.from_numpy(embed).unsqueeze(0) + + feats[file] = ref_feat + + telapsed = time.time() - tstart + + if idx % print_interval == 0: + sys.stdout.write("\rReading %d of %d: %.2f Hz, embedding size %d"%(idx,len(setfiles),idx/telapsed,ref_feat.size()[1])); + + print('') + all_scores = []; + all_labels = []; + all_trials = []; + tstart = time.time() + + ## Read files and compute all scores + for idx, line in enumerate(lines): + + data = line.split(); + ## Append random label if missing + if len(data) == 2: data = [random.randint(0,1)] + data + + ref_feat = feats[data[1]] + com_feat = feats[data[2]] + ref_feat = ref_feat.cuda() + com_feat = com_feat.cuda() + # normalize feats + ref_feat = F.normalize(ref_feat, p=2, dim=1) + com_feat = F.normalize(com_feat, p=2, dim=1) + + dist = F.pairwise_distance(ref_feat.unsqueeze(-1), com_feat.unsqueeze(-1)).detach().cpu().numpy(); + + score = -1 * numpy.mean(dist); + + all_scores.append(score); + all_labels.append(int(data[0])); + all_trials.append(data[1]+" "+data[2]) + + if idx % print_interval == 0: + telapsed = time.time() - tstart + sys.stdout.write("\rComputing %d of %d: %.2f Hz"%(idx,len(lines),idx/telapsed)); + sys.stdout.flush(); + + print('\n') + + return (all_scores, all_labels, all_trials); + + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser("baseline") + parser.add_argument("--data_root", type=str, help="", required=True) + parser.add_argument("--list", type=str, help="", required=True) + parser.add_argument("--model_dir", type=str, help="model parameters for AudioEncoder", required=True) + + args = parser.parse_args() + + + # Load the models one by one. + print("Preparing the encoder...") + # encoder.load_model(Path(args.model_dir)) + print("Insert the wav file name...") + voice_encoder = VoiceEncoder().cuda() + + sc, lab, trials = evaluateFromList(args.list, print_interval=100, test_path=args.data_root) + result = tuneThresholdfromScore(sc, lab, [1, 0.1]); + print('EER %2.4f'%result[1]) diff --git a/NeuralSeq/data_gen/tts/txt_processors/__init__.py b/NeuralSeq/data_gen/tts/txt_processors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7bff3e9af7d634363116c6605f22a52aad614dea --- /dev/null +++ b/NeuralSeq/data_gen/tts/txt_processors/__init__.py @@ -0,0 +1 @@ +from . import en \ No newline at end of file diff --git a/NeuralSeq/data_gen/tts/txt_processors/__pycache__/__init__.cpython-38.pyc b/NeuralSeq/data_gen/tts/txt_processors/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfadb1d8e3c9b4f1fa86618338afee2a7f226414 Binary files /dev/null and b/NeuralSeq/data_gen/tts/txt_processors/__pycache__/__init__.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/txt_processors/__pycache__/base_text_processor.cpython-38.pyc b/NeuralSeq/data_gen/tts/txt_processors/__pycache__/base_text_processor.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f10e03f97d5e3287538681ef1a113f3b3a8f21b9 Binary files /dev/null and b/NeuralSeq/data_gen/tts/txt_processors/__pycache__/base_text_processor.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/txt_processors/__pycache__/en.cpython-38.pyc b/NeuralSeq/data_gen/tts/txt_processors/__pycache__/en.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40d91e96ee4417bef57c5922ad9f5f359c71bd54 Binary files /dev/null and b/NeuralSeq/data_gen/tts/txt_processors/__pycache__/en.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/txt_processors/base_text_processor.py b/NeuralSeq/data_gen/tts/txt_processors/base_text_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..69d51201dcb191c1c208ae1c87a34b5c97e6307f --- /dev/null +++ b/NeuralSeq/data_gen/tts/txt_processors/base_text_processor.py @@ -0,0 +1,47 @@ +from data_gen.tts.data_gen_utils import is_sil_phoneme + +REGISTERED_TEXT_PROCESSORS = {} + +def register_txt_processors(name): + def _f(cls): + REGISTERED_TEXT_PROCESSORS[name] = cls + return cls + + return _f + + +def get_txt_processor_cls(name): + return REGISTERED_TEXT_PROCESSORS.get(name, None) + + +class BaseTxtProcessor: + @staticmethod + def sp_phonemes(): + return ['|'] + + @classmethod + def process(cls, txt, preprocess_args): + raise NotImplementedError + + @classmethod + def postprocess(cls, txt_struct, preprocess_args): + # remove sil phoneme in head and tail + while len(txt_struct) > 0 and is_sil_phoneme(txt_struct[0][0]): + txt_struct = txt_struct[1:] + while len(txt_struct) > 0 and is_sil_phoneme(txt_struct[-1][0]): + txt_struct = txt_struct[:-1] + if preprocess_args['with_phsep']: + txt_struct = cls.add_bdr(txt_struct) + if preprocess_args['add_eos_bos']: + txt_struct = [["", [""]]] + txt_struct + [["", [""]]] + return txt_struct + + @classmethod + def add_bdr(cls, txt_struct): + txt_struct_ = [] + for i, ts in enumerate(txt_struct): + txt_struct_.append(ts) + if i != len(txt_struct) - 1 and \ + not is_sil_phoneme(txt_struct[i][0]) and not is_sil_phoneme(txt_struct[i + 1][0]): + txt_struct_.append(['|', ['|']]) + return txt_struct_ \ No newline at end of file diff --git a/NeuralSeq/data_gen/tts/txt_processors/en.py b/NeuralSeq/data_gen/tts/txt_processors/en.py new file mode 100644 index 0000000000000000000000000000000000000000..6f755d5ab1f2cf4407daee08cc3639a05e941a97 --- /dev/null +++ b/NeuralSeq/data_gen/tts/txt_processors/en.py @@ -0,0 +1,77 @@ +import re +import unicodedata + +from g2p_en import G2p +from g2p_en.expand import normalize_numbers +from nltk import pos_tag +from nltk.tokenize import TweetTokenizer + +from data_gen.tts.txt_processors.base_text_processor import BaseTxtProcessor, register_txt_processors +from data_gen.tts.data_gen_utils import is_sil_phoneme, PUNCS + +class EnG2p(G2p): + word_tokenize = TweetTokenizer().tokenize + + def __call__(self, text): + # preprocessing + words = EnG2p.word_tokenize(text) + tokens = pos_tag(words) # tuples of (word, tag) + + # steps + prons = [] + for word, pos in tokens: + if re.search("[a-z]", word) is None: + pron = [word] + + elif word in self.homograph2features: # Check homograph + pron1, pron2, pos1 = self.homograph2features[word] + if pos.startswith(pos1): + pron = pron1 + else: + pron = pron2 + elif word in self.cmu: # lookup CMU dict + pron = self.cmu[word][0] + else: # predict for oov + pron = self.predict(word) + + prons.extend(pron) + prons.extend([" "]) + + return prons[:-1] + + +@register_txt_processors('en') +class TxtProcessor(BaseTxtProcessor): + g2p = EnG2p() + + @staticmethod + def preprocess_text(text): + text = normalize_numbers(text) + text = ''.join(char for char in unicodedata.normalize('NFD', text) + if unicodedata.category(char) != 'Mn') # Strip accents + text = text.lower() + text = re.sub("[\'\"()]+", "", text) + text = re.sub("[-]+", " ", text) + text = re.sub(f"[^ a-z{PUNCS}]", "", text) + text = re.sub(f" ?([{PUNCS}]) ?", r"\1", text) # !! -> ! + text = re.sub(f"([{PUNCS}])+", r"\1", text) # !! -> ! + text = text.replace("i.e.", "that is") + text = text.replace("i.e.", "that is") + text = text.replace("etc.", "etc") + text = re.sub(f"([{PUNCS}])", r" \1 ", text) + text = re.sub(rf"\s+", r" ", text) + return text + + @classmethod + def process(cls, txt, preprocess_args): + txt = cls.preprocess_text(txt).strip() + phs = cls.g2p(txt) + txt_struct = [[w, []] for w in txt.split(" ")] + i_word = 0 + for p in phs: + if p == ' ': + i_word += 1 + else: + txt_struct[i_word][1].append(p) + txt_struct = cls.postprocess(txt_struct, preprocess_args) + return txt_struct, txt \ No newline at end of file diff --git a/NeuralSeq/data_gen/tts/txt_processors/zh.py b/NeuralSeq/data_gen/tts/txt_processors/zh.py new file mode 100644 index 0000000000000000000000000000000000000000..ab155ed172dd1fd4e0d8802ce482a84bf897ce60 --- /dev/null +++ b/NeuralSeq/data_gen/tts/txt_processors/zh.py @@ -0,0 +1,43 @@ +import re +import jieba +from pypinyin import pinyin, Style +from data_gen.tts.data_gen_utils import PUNCS +from data_gen.tts.txt_processors.base_text_processor import BaseTxtProcessor +from utils.text_norm import NSWNormalizer + + +class TxtProcessor(BaseTxtProcessor): + table = {ord(f): ord(t) for f, t in zip( + u':,。!?【】()%#@&1234567890', + u':,.!?[]()%#@&1234567890')} + + @staticmethod + def preprocess_text(text): + text = text.translate(TxtProcessor.table) + text = NSWNormalizer(text).normalize(remove_punc=False) + text = re.sub("[\'\"()]+", "", text) + text = re.sub("[-]+", " ", text) + text = re.sub(f"[^ A-Za-z\u4e00-\u9fff{PUNCS}]", "", text) + text = re.sub(f"([{PUNCS}])+", r"\1", text) # !! -> ! + text = re.sub(f"([{PUNCS}])", r" \1 ", text) + text = re.sub(rf"\s+", r"", text) + text = re.sub(rf"[A-Za-z]+", r"$", text) + return text + + @classmethod + def process(cls, txt, pre_align_args): + txt = cls.preprocess_text(txt) + shengmu = pinyin(txt, style=Style.INITIALS) # https://blog.csdn.net/zhoulei124/article/details/89055403 + yunmu_finals = pinyin(txt, style=Style.FINALS) + yunmu_tone3 = pinyin(txt, style=Style.FINALS_TONE3) + yunmu = [[t[0] + '5'] if t[0] == f[0] else t for f, t in zip(yunmu_finals, yunmu_tone3)] \ + if pre_align_args['use_tone'] else yunmu_finals + + assert len(shengmu) == len(yunmu) + phs = ["|"] + for a, b, c in zip(shengmu, yunmu, yunmu_finals): + if a[0] == c[0]: + phs += [a[0], "|"] + else: + phs += [a[0], b[0], "|"] + return phs, txt diff --git a/NeuralSeq/data_gen/tts/txt_processors/zh_g2pM.py b/NeuralSeq/data_gen/tts/txt_processors/zh_g2pM.py new file mode 100644 index 0000000000000000000000000000000000000000..a3dbc7f2feb4c1dc3bc2d745a260cd0a7922e140 --- /dev/null +++ b/NeuralSeq/data_gen/tts/txt_processors/zh_g2pM.py @@ -0,0 +1,72 @@ +import re +import jieba +from pypinyin import pinyin, Style +from data_gen.tts.data_gen_utils import PUNCS +from data_gen.tts.txt_processors import zh +from g2pM import G2pM + +ALL_SHENMU = ['zh', 'ch', 'sh', 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'j', + 'q', 'x', 'r', 'z', 'c', 's', 'y', 'w'] +ALL_YUNMU = ['a', 'ai', 'an', 'ang', 'ao', 'e', 'ei', 'en', 'eng', 'er', 'i', 'ia', 'ian', + 'iang', 'iao', 'ie', 'in', 'ing', 'iong', 'iu', 'ng', 'o', 'ong', 'ou', + 'u', 'ua', 'uai', 'uan', 'uang', 'ui', 'un', 'uo', 'v', 'van', 've', 'vn'] + + +class TxtProcessor(zh.TxtProcessor): + model = G2pM() + + @staticmethod + def sp_phonemes(): + return ['|', '#'] + + @classmethod + def process(cls, txt, pre_align_args): + txt = cls.preprocess_text(txt) + ph_list = cls.model(txt, tone=pre_align_args['use_tone'], char_split=True) + seg_list = '#'.join(jieba.cut(txt)) + assert len(ph_list) == len([s for s in seg_list if s != '#']), (ph_list, seg_list) + + # 加入词边界'#' + ph_list_ = [] + seg_idx = 0 + for p in ph_list: + p = p.replace("u:", "v") + if seg_list[seg_idx] == '#': + ph_list_.append('#') + seg_idx += 1 + else: + ph_list_.append("|") + seg_idx += 1 + if re.findall('[\u4e00-\u9fff]', p): + if pre_align_args['use_tone']: + p = pinyin(p, style=Style.TONE3, strict=True)[0][0] + if p[-1] not in ['1', '2', '3', '4', '5']: + p = p + '5' + else: + p = pinyin(p, style=Style.NORMAL, strict=True)[0][0] + + finished = False + if len([c.isalpha() for c in p]) > 1: + for shenmu in ALL_SHENMU: + if p.startswith(shenmu) and not p.lstrip(shenmu).isnumeric(): + ph_list_ += [shenmu, p.lstrip(shenmu)] + finished = True + break + if not finished: + ph_list_.append(p) + + ph_list = ph_list_ + + # 去除静音符号周围的词边界标记 [..., '#', ',', '#', ...] + sil_phonemes = list(PUNCS) + TxtProcessor.sp_phonemes() + ph_list_ = [] + for i in range(0, len(ph_list), 1): + if ph_list[i] != '#' or (ph_list[i - 1] not in sil_phonemes and ph_list[i + 1] not in sil_phonemes): + ph_list_.append(ph_list[i]) + ph_list = ph_list_ + return ph_list, txt + + +if __name__ == '__main__': + phs, txt = TxtProcessor.process('他来到了,网易杭研大厦', {'use_tone': True}) + print(phs) diff --git a/NeuralSeq/data_gen/tts/wav_processors/__init__.py b/NeuralSeq/data_gen/tts/wav_processors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4be97b377dcb95a0e6bceb876ac0ce93c8290249 --- /dev/null +++ b/NeuralSeq/data_gen/tts/wav_processors/__init__.py @@ -0,0 +1,2 @@ +from . import base_processor +from . import common_processors diff --git a/NeuralSeq/data_gen/tts/wav_processors/__pycache__/__init__.cpython-38.pyc b/NeuralSeq/data_gen/tts/wav_processors/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8df69223969f0fd45fce9c20f5d340e99583e928 Binary files /dev/null and b/NeuralSeq/data_gen/tts/wav_processors/__pycache__/__init__.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/wav_processors/__pycache__/base_processor.cpython-38.pyc b/NeuralSeq/data_gen/tts/wav_processors/__pycache__/base_processor.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8530475f0849abfbdaec8f1a220f4f1e8e182094 Binary files /dev/null and b/NeuralSeq/data_gen/tts/wav_processors/__pycache__/base_processor.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/wav_processors/__pycache__/common_processors.cpython-38.pyc b/NeuralSeq/data_gen/tts/wav_processors/__pycache__/common_processors.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8adea3b8093308a2d0e0b9f47c2c0f516fc67d8 Binary files /dev/null and b/NeuralSeq/data_gen/tts/wav_processors/__pycache__/common_processors.cpython-38.pyc differ diff --git a/NeuralSeq/data_gen/tts/wav_processors/base_processor.py b/NeuralSeq/data_gen/tts/wav_processors/base_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..e8200dc58a9388ac94a5ec34b8a65f75e380255b --- /dev/null +++ b/NeuralSeq/data_gen/tts/wav_processors/base_processor.py @@ -0,0 +1,25 @@ +REGISTERED_WAV_PROCESSORS = {} + + +def register_wav_processors(name): + def _f(cls): + REGISTERED_WAV_PROCESSORS[name] = cls + return cls + + return _f + + +def get_wav_processor_cls(name): + return REGISTERED_WAV_PROCESSORS.get(name, None) + + +class BaseWavProcessor: + @property + def name(self): + raise NotImplementedError + + def output_fn(self, input_fn): + return f'{input_fn[:-4]}_{self.name}.wav' + + def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): + raise NotImplementedError diff --git a/NeuralSeq/data_gen/tts/wav_processors/common_processors.py b/NeuralSeq/data_gen/tts/wav_processors/common_processors.py new file mode 100644 index 0000000000000000000000000000000000000000..8b0c62d5e1485ed9612b4452a656f0e837c2d693 --- /dev/null +++ b/NeuralSeq/data_gen/tts/wav_processors/common_processors.py @@ -0,0 +1,85 @@ +import os +import subprocess +import librosa +import numpy as np +from data_gen.tts.wav_processors.base_processor import BaseWavProcessor, register_wav_processors +from data_gen.tts.data_gen_utils import trim_long_silences +from utils.audio import save_wav, rnnoise +from utils.hparams import hparams + + +@register_wav_processors(name='sox_to_wav') +class ConvertToWavProcessor(BaseWavProcessor): + @property + def name(self): + return 'ToWav' + + def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): + if input_fn[-4:] == '.wav': + return input_fn, sr + else: + output_fn = self.output_fn(input_fn) + subprocess.check_call(f'sox -v 0.95 "{input_fn}" -t wav "{output_fn}"', shell=True) + return output_fn, sr + + +@register_wav_processors(name='sox_resample') +class ResampleProcessor(BaseWavProcessor): + @property + def name(self): + return 'Resample' + + def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): + output_fn = self.output_fn(input_fn) + sr_file = librosa.core.get_samplerate(input_fn) + if sr != sr_file: + subprocess.check_call(f'sox -v 0.95 "{input_fn}" -r{sr} "{output_fn}"', shell=True) + y, _ = librosa.core.load(input_fn, sr=sr) + y, _ = librosa.effects.trim(y) + save_wav(y, output_fn, sr) + return output_fn, sr + else: + return input_fn, sr + + +@register_wav_processors(name='trim_sil') +class TrimSILProcessor(BaseWavProcessor): + @property + def name(self): + return 'TrimSIL' + + def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): + output_fn = self.output_fn(input_fn) + y, _ = librosa.core.load(input_fn, sr=sr) + y, _ = librosa.effects.trim(y) + save_wav(y, output_fn, sr) + return output_fn + + +@register_wav_processors(name='trim_all_sil') +class TrimAllSILProcessor(BaseWavProcessor): + @property + def name(self): + return 'TrimSIL' + + def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): + output_fn = self.output_fn(input_fn) + y, audio_mask, _ = trim_long_silences( + input_fn, vad_max_silence_length=preprocess_args.get('vad_max_silence_length', 12)) + save_wav(y, output_fn, sr) + if preprocess_args['save_sil_mask']: + os.makedirs(f'{processed_dir}/sil_mask', exist_ok=True) + np.save(f'{processed_dir}/sil_mask/{item_name}.npy', audio_mask) + return output_fn, sr + + +@register_wav_processors(name='denoise') +class DenoiseProcessor(BaseWavProcessor): + @property + def name(self): + return 'Denoise' + + def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): + output_fn = self.output_fn(input_fn) + rnnoise(input_fn, output_fn, out_sample_rate=sr) + return output_fn, sr diff --git a/NeuralSeq/egs/datasets/audio/emotion/base_text2mel.yaml b/NeuralSeq/egs/datasets/audio/emotion/base_text2mel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b10ae9c7e6ca6db81c75cd748280614911c640fa --- /dev/null +++ b/NeuralSeq/egs/datasets/audio/emotion/base_text2mel.yaml @@ -0,0 +1,17 @@ +raw_data_dir: 'data/raw/ESD' +processed_data_dir: 'data/processed/emotion' +binary_data_dir: 'data/binary/emotion' +pre_align_cls: egs.datasets.audio.emotion.pre_align.EmoPreAlign +audio_sample_rate: 16000 +binarization_args: + shuffle: true +binarizer_cls: data_gen.tts.base_binarizer_emotion.EmotionBinarizer +use_spk_id: true +test_num: 200 +num_spk: 10 +pitch_type: frame +min_frames: 128 +num_test_samples: 30 +mel_loss: "ssim:0.5|l1:0.5" +vocoder_ckpt: '' +use_emotion: true \ No newline at end of file diff --git a/NeuralSeq/egs/datasets/audio/emotion/pre_align.py b/NeuralSeq/egs/datasets/audio/emotion/pre_align.py new file mode 100644 index 0000000000000000000000000000000000000000..3b625295a118845c01a3677004070714d11c162b --- /dev/null +++ b/NeuralSeq/egs/datasets/audio/emotion/pre_align.py @@ -0,0 +1,25 @@ +import os + +from data_gen.tts.base_preprocess import BasePreprocessor +import glob +import re + +class EmoPreAlign(BasePreprocessor): + + def meta_data(self): + spks = ['0012', '0011', '0013', '0014', '0015', '0016', '0017', '0018', '0019', '0020'] + pattern = re.compile('[\t\n ]+') + for spk in spks: + for line in open(f"{self.raw_data_dir}/{spk}/{spk}.txt", 'r'): # 打开文件 + line = re.sub(pattern, ' ', line) + if line == ' ': continue + split_ = line.split(' ') + txt = ' '.join(split_[1: -2]) + item_name = split_[0] + emotion = split_[-2] + wav_fn = f'{self.raw_data_dir}/{spk}/{emotion}/{item_name}.wav' + yield item_name, wav_fn, txt, spk, emotion + + +if __name__ == "__main__": + EmoPreAlign().process() diff --git a/NeuralSeq/egs/datasets/audio/libritts/__pycache__/pre_align.cpython-38.pyc b/NeuralSeq/egs/datasets/audio/libritts/__pycache__/pre_align.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb11362f1f7dba53bda29be2d0905e167d066fdc Binary files /dev/null and b/NeuralSeq/egs/datasets/audio/libritts/__pycache__/pre_align.cpython-38.pyc differ diff --git a/NeuralSeq/egs/datasets/audio/libritts/base_text2mel.yaml b/NeuralSeq/egs/datasets/audio/libritts/base_text2mel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85c389a45a7bc2be4927867d07fa881754814a2c --- /dev/null +++ b/NeuralSeq/egs/datasets/audio/libritts/base_text2mel.yaml @@ -0,0 +1,14 @@ +raw_data_dir: 'data/raw/LibriTTS' +processed_data_dir: 'data/processed/libritts' +binary_data_dir: 'data/binary/libritts' +pre_align_cls: egs.datasets.audio.libritts.pre_align.LibrittsPreAlign +binarization_args: + shuffle: true +use_spk_id: true +test_num: 200 +num_spk: 2320 +pitch_type: frame +min_frames: 128 +num_test_samples: 30 +mel_loss: "ssim:0.5|l1:0.5" +vocoder_ckpt: '' \ No newline at end of file diff --git a/NeuralSeq/egs/datasets/audio/libritts/fs2.yaml b/NeuralSeq/egs/datasets/audio/libritts/fs2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..95ae09730837aa10606f9771f41f40429b99d9ac --- /dev/null +++ b/NeuralSeq/egs/datasets/audio/libritts/fs2.yaml @@ -0,0 +1,3 @@ +base_config: + - egs/egs_bases/tts/fs2.yaml + - ./base_text2mel.yaml diff --git a/NeuralSeq/egs/datasets/audio/libritts/pre_align.py b/NeuralSeq/egs/datasets/audio/libritts/pre_align.py new file mode 100644 index 0000000000000000000000000000000000000000..8f04d01361430a4ad6b02421ac4e20d797f31dc8 --- /dev/null +++ b/NeuralSeq/egs/datasets/audio/libritts/pre_align.py @@ -0,0 +1,27 @@ +import os + +from data_gen.tts.base_preprocess import BasePreprocessor +import glob + + +class LibrittsPreAlign(BasePreprocessor): + def meta_data(self): + wav_fns = sorted(glob.glob(f'{self.raw_data_dir}/*/*/*.wav')) + for wav_fn in wav_fns: + item_name = os.path.basename(wav_fn)[:-4] + txt_fn = f'{wav_fn[:-4]}.normalized.txt' + with open(txt_fn, 'r') as f: + txt = f.readlines() + f.close() + spk = item_name.split("_")[0] + # Example: + # + # 'item_name': '103_1241_000000_000001' + # 'wav_fn': 'LibriTTS/train-clean-100/103/1241/103_1241_000000_000001.wav' + # 'txt': 'matthew Cuthbert is surprised' + # 'spk_name': '103' + yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': txt[0], 'spk_name': spk} + + +if __name__ == "__main__": + LibrittsPreAlign().process() diff --git a/NeuralSeq/egs/datasets/audio/libritts/pwg.yaml b/NeuralSeq/egs/datasets/audio/libritts/pwg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a0fd70869274c3f8a7ac02a9ca5d7e1202dc895d --- /dev/null +++ b/NeuralSeq/egs/datasets/audio/libritts/pwg.yaml @@ -0,0 +1,8 @@ +base_config: egs/egs_bases/tts/vocoder/pwg.yaml +raw_data_dir: 'data/raw/LibriTTS' +processed_data_dir: 'data/processed/libritts' +binary_data_dir: 'data/binary/libritts_wav' +generator_params: + kernel_size: 5 +num_spk: 400 +max_samples: 20480 diff --git a/NeuralSeq/egs/datasets/audio/lj/base_mel2wav.yaml b/NeuralSeq/egs/datasets/audio/lj/base_mel2wav.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df4355bc38d0568c0b3acfa4f7fc040cef5995d6 --- /dev/null +++ b/NeuralSeq/egs/datasets/audio/lj/base_mel2wav.yaml @@ -0,0 +1,5 @@ +raw_data_dir: 'data/raw/LJSpeech-1.1' +processed_data_dir: 'data/processed/ljspeech' +binary_data_dir: 'data/binary/ljspeech_wav' +binarization_args: + with_spk_embed: false \ No newline at end of file diff --git a/NeuralSeq/egs/datasets/audio/lj/preprocess.py b/NeuralSeq/egs/datasets/audio/lj/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..a3d45c9aa855bb7ce40b5e8374547014350fa92b --- /dev/null +++ b/NeuralSeq/egs/datasets/audio/lj/preprocess.py @@ -0,0 +1,9 @@ +from data_gen.tts.base_preprocess import BasePreprocessor + + +class LJPreprocess(BasePreprocessor): + def meta_data(self): + for l in open(f'{self.raw_data_dir}/metadata.csv').readlines(): + item_name, _, txt = l.strip().split("|") + wav_fn = f"{self.raw_data_dir}/wavs/{item_name}.wav" + yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': txt} diff --git a/NeuralSeq/egs/datasets/audio/lj/pwg.yaml b/NeuralSeq/egs/datasets/audio/lj/pwg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e0c6dc6da4367bad9bafd1a7ba492a5cfb18a347 --- /dev/null +++ b/NeuralSeq/egs/datasets/audio/lj/pwg.yaml @@ -0,0 +1,3 @@ +base_config: + - egs/egs_bases/tts/vocoder/pwg.yaml + - ./base_mel2wav.yaml \ No newline at end of file diff --git a/NeuralSeq/egs/datasets/audio/vctk/base_mel2wav.yaml b/NeuralSeq/egs/datasets/audio/vctk/base_mel2wav.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b5210a1361259d30449430f70849061d17a8e59f --- /dev/null +++ b/NeuralSeq/egs/datasets/audio/vctk/base_mel2wav.yaml @@ -0,0 +1,3 @@ +raw_data_dir: 'data/raw/VCTK-Corpus' +processed_data_dir: 'data/processed/vctk' +binary_data_dir: 'data/binary/vctk_wav' diff --git a/NeuralSeq/egs/datasets/audio/vctk/fs2.yaml b/NeuralSeq/egs/datasets/audio/vctk/fs2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..49bb983bd98caca74af0ea70414e71aa34334b38 --- /dev/null +++ b/NeuralSeq/egs/datasets/audio/vctk/fs2.yaml @@ -0,0 +1,12 @@ +base_config: + - egs/egs_bases/tts/fs2.yaml +raw_data_dir: 'data/raw/VCTK-Corpus' +processed_data_dir: 'data/processed/vctk' +binary_data_dir: 'data/binary/vctk' +pre_align_cls: egs.datasets.audio.vctk.pre_align.VCTKPreAlign +use_spk_id: true +test_num: 200 +num_spk: 400 +binarization_args: + shuffle: true + trim_eos_bos: true \ No newline at end of file diff --git a/NeuralSeq/egs/datasets/audio/vctk/pre_align.py b/NeuralSeq/egs/datasets/audio/vctk/pre_align.py new file mode 100644 index 0000000000000000000000000000000000000000..a03b3e12af245fa603403432f4487c53e8b13eab --- /dev/null +++ b/NeuralSeq/egs/datasets/audio/vctk/pre_align.py @@ -0,0 +1,22 @@ +import os + +from data_gen.tts.base_pre_align import BasePreAlign +import glob + + +class VCTKPreAlign(BasePreAlign): + def meta_data(self): + wav_fns = glob.glob(f'{self.raw_data_dir}/wav48/*/*.wav') + for wav_fn in wav_fns: + item_name = os.path.basename(wav_fn)[:-4] + spk = item_name.split("_")[0] + txt_fn = wav_fn.split("/") + txt_fn[-1] = f'{item_name}.txt' + txt_fn[-3] = f'txt' + txt_fn = "/".join(txt_fn) + if os.path.exists(txt_fn) and os.path.exists(wav_fn): + yield item_name, wav_fn, (self.load_txt, txt_fn), spk + + +if __name__ == "__main__": + VCTKPreAlign().process() diff --git a/NeuralSeq/egs/datasets/audio/vctk/pwg.yaml b/NeuralSeq/egs/datasets/audio/vctk/pwg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8c7c557fee02b0a62881f36a2abe010ede0d5f39 --- /dev/null +++ b/NeuralSeq/egs/datasets/audio/vctk/pwg.yaml @@ -0,0 +1,6 @@ +base_config: + - egs/egs_bases/tts/vocoder/pwg.yaml + - ./base_mel2wav.yaml + +num_spk: 400 +max_samples: 20480 diff --git a/NeuralSeq/egs/egs_bases/config_base.yaml b/NeuralSeq/egs/egs_bases/config_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39240ceb8cd52e4c0a146aa579f1ea046c727c84 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/config_base.yaml @@ -0,0 +1,46 @@ +# task +binary_data_dir: '' +work_dir: '' # experiment directory. +infer: false # inference +amp: false +seed: 1234 +debug: false +save_codes: [] +# - configs +# - modules +# - tasks +# - utils +# - usr + +############# +# dataset +############# +ds_workers: 1 +test_num: 100 +endless_ds: false +sort_by_len: true + +######### +# train and eval +######### +print_nan_grads: false +load_ckpt: '' +save_best: true +num_ckpt_keep: 3 +clip_grad_norm: 0 +accumulate_grad_batches: 1 +tb_log_interval: 100 +num_sanity_val_steps: 5 # steps of validation at the beginning +check_val_every_n_epoch: 10 +val_check_interval: 2000 +valid_monitor_key: 'val_loss' +valid_monitor_mode: 'min' +max_epochs: 1000 +max_updates: 1000000 +max_tokens: 31250 +max_sentences: 100000 +max_valid_tokens: -1 +max_valid_sentences: -1 +test_input_dir: '' +resume_from_checkpoint: 0 +rename_tmux: true \ No newline at end of file diff --git a/NeuralSeq/egs/egs_bases/svs/base.yaml b/NeuralSeq/egs/egs_bases/svs/base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ec306951586ae23506de9edfdc24b05279ff2f12 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/svs/base.yaml @@ -0,0 +1,24 @@ +task_cls: tasks.svs.task.DiffFsTask +pitch_type: frame +timesteps: 100 +dilation_cycle_length: 1 +residual_layers: 20 +residual_channels: 256 +lr: 0.001 +decay_steps: 50000 +keep_bins: 80 +spec_min: [ ] +spec_max: [ ] + +content_cond_steps: [ ] # [ 0, 10000 ] +spk_cond_steps: [ ] # [ 0, 10000 ] +# train and eval +fs2_ckpt: '' +max_updates: 400000 +# max_updates: 200000 +use_gt_dur: true +use_gt_f0: true +gen_tgt_spk_id: -1 +max_sentences: 48 +num_sanity_val_steps: 1 +num_valid_plots: 1 diff --git a/NeuralSeq/egs/egs_bases/svs/lj_ds_beta6.yaml b/NeuralSeq/egs/egs_bases/svs/lj_ds_beta6.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7fd443c9843fb582283e75d44935e0152e2b4cb7 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/svs/lj_ds_beta6.yaml @@ -0,0 +1,43 @@ +base_config: + - configs/tts/lj/fs2.yaml + - ./base.yaml +# spec_min and spec_max are calculated on the training set. +spec_min: [ -4.7574, -4.6783, -4.6431, -4.5832, -4.5390, -4.6771, -4.8089, -4.7672, + -4.5784, -4.7755, -4.7150, -4.8919, -4.8271, -4.7389, -4.6047, -4.7759, + -4.6799, -4.8201, -4.7823, -4.8262, -4.7857, -4.7545, -4.9358, -4.9733, + -5.1134, -5.1395, -4.9016, -4.8434, -5.0189, -4.8460, -5.0529, -4.9510, + -5.0217, -5.0049, -5.1831, -5.1445, -5.1015, -5.0281, -4.9887, -4.9916, + -4.9785, -4.9071, -4.9488, -5.0342, -4.9332, -5.0650, -4.8924, -5.0875, + -5.0483, -5.0848, -5.1809, -5.0677, -5.0015, -5.0792, -5.0636, -5.2413, + -5.1421, -5.1710, -5.3256, -5.0511, -5.1186, -5.0057, -5.0446, -5.1173, + -5.0325, -5.1085, -5.0053, -5.0755, -5.1176, -5.1004, -5.2153, -5.2757, + -5.3025, -5.2867, -5.2918, -5.3328, -5.2731, -5.2985, -5.2400, -5.2211 ] +spec_max: [ -0.5982, -0.0778, 0.1205, 0.2747, 0.4657, 0.5123, 0.5684, 0.7093, + 0.6461, 0.6420, 0.7316, 0.7715, 0.7681, 0.8349, 0.7815, 0.7591, + 0.7910, 0.7433, 0.7352, 0.6869, 0.6854, 0.6623, 0.5353, 0.6492, + 0.6909, 0.6106, 0.5761, 0.5936, 0.5638, 0.4054, 0.4545, 0.3589, + 0.3037, 0.3380, 0.1599, 0.2433, 0.2741, 0.2130, 0.1569, 0.1911, + 0.2324, 0.1586, 0.1221, 0.0341, -0.0558, 0.0553, -0.1153, -0.0933, + -0.1171, -0.0050, -0.1519, -0.1629, -0.0522, -0.0739, -0.2069, -0.2405, + -0.1244, -0.2116, -0.1361, -0.1575, -0.1442, 0.0513, -0.1567, -0.2000, + 0.0086, -0.0698, 0.1385, 0.0941, 0.1864, 0.1225, 0.2176, 0.2566, + 0.1670, 0.1007, 0.1444, 0.0888, 0.1998, 0.2414, 0.2932, 0.3047 ] + +task_cls: tasks.svs.diffspeech_task.DiffSpeechTask +vocoder: vocoders.hifigan.HifiGAN +vocoder_ckpt: checkpoints/0414_hifi_lj_1 +num_valid_plots: 10 +use_gt_dur: false +use_gt_f0: false +pitch_type: cwt +pitch_extractor: 'parselmouth' +max_updates: 160000 +lr: 0.001 +timesteps: 100 +K_step: 71 +diff_loss_type: l1 +diff_decoder_type: 'wavenet' +schedule_type: 'linear' +max_beta: 0.06 +fs2_ckpt: checkpoints/fs2_lj_1/model_ckpt_steps_150000.ckpt +save_gt: true \ No newline at end of file diff --git a/NeuralSeq/egs/egs_bases/svs/midi/cascade/opencs/aux_rel.yaml b/NeuralSeq/egs/egs_bases/svs/midi/cascade/opencs/aux_rel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5c204cba8e70679c48301af9fd2302427a2be0dd --- /dev/null +++ b/NeuralSeq/egs/egs_bases/svs/midi/cascade/opencs/aux_rel.yaml @@ -0,0 +1,63 @@ +base_config: + - configs/singing/fs2.yaml + - egs/egs_bases/svs/midi/cascade/opencs/opencpop_statis.yaml + +audio_sample_rate: 24000 +hop_size: 128 # Hop size. +fft_size: 512 # FFT size. +win_size: 512 # FFT size. +fmin: 30 +fmax: 12000 +min_level_db: -120 + +binarization_args: + with_wav: true + with_spk_embed: false + with_align: true +raw_data_dir: 'data/raw/opencpop/segments' +processed_data_dir: 'xxx' +binarizer_cls: data_gen.singing.binarize.OpencpopBinarizer + + +binary_data_dir: 'data/binary/opencpop-midi-dp' +use_midi: true # for midi exp +use_gt_f0: false # for midi exp +use_gt_dur: false # for further midi exp +lambda_f0: 1.0 +lambda_uv: 1.0 +#lambda_energy: 0.1 +lambda_ph_dur: 1.0 +lambda_sent_dur: 1.0 +lambda_word_dur: 1.0 +predictor_grad: 0.1 +pe_enable: false +pe_ckpt: '' + +num_spk: 1 +test_prefixes: [ + '2044', + '2086', + '2092', + '2093', + '2100', +] + +task_cls: tasks.svs.diffsinger_task.AuxDecoderMIDITask +#vocoder: tasks.svs.singingvocoder.highgan.HighGAN +#vocoder_ckpt: checkpoints/h_2_model/checkpoint-530000steps.pkl +vocoder: vocoders.hifigan.HifiGAN +vocoder_ckpt: checkpoints/0109_hifigan_bigpopcs_hop128 + +use_nsf: true + +# config for experiments +max_frames: 5000 +max_tokens: 40000 +predictor_layers: 5 +rel_pos: true +dur_predictor_layers: 5 # * + +use_spk_embed: false +num_valid_plots: 10 +max_updates: 160000 +save_gt: true \ No newline at end of file diff --git a/NeuralSeq/egs/egs_bases/svs/midi/cascade/opencs/ds60_rel.yaml b/NeuralSeq/egs/egs_bases/svs/midi/cascade/opencs/ds60_rel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..820a608456d199b77d021d5a771f0a076a5a4cfa --- /dev/null +++ b/NeuralSeq/egs/egs_bases/svs/midi/cascade/opencs/ds60_rel.yaml @@ -0,0 +1,34 @@ +base_config: + - egs/egs_bases/svs/popcs_ds_beta6.yaml + - egs/egs_bases/svs/midi/cascade/opencs/opencpop_statis.yaml + +binarizer_cls: data_gen.singing.binarize.OpencpopBinarizer +binary_data_dir: 'data/binary/opencpop-midi-dp' + +#switch_midi2f0_step: 174000 +use_midi: true # for midi exp +use_gt_f0: false # for midi exp +use_gt_dur: false # for further midi exp +lambda_f0: 1.0 +lambda_uv: 1.0 +#lambda_energy: 0.1 +lambda_ph_dur: 1.0 +lambda_sent_dur: 1.0 +lambda_word_dur: 1.0 +predictor_grad: 0.1 +pe_enable: false +pe_ckpt: '' + +fs2_ckpt: 'checkpoints/0302_opencpop_fs_midi/model_ckpt_steps_160000.ckpt' # +#num_valid_plots: 0 +task_cls: tasks.svs.diffsinger_task.DiffSingerMIDITask + +K_step: 60 +max_tokens: 36000 +predictor_layers: 5 +dilation_cycle_length: 4 # * +rel_pos: true +dur_predictor_layers: 5 # * +max_updates: 160000 +gaussian_start: false +mask_uv_prob: 0.15 \ No newline at end of file diff --git a/NeuralSeq/egs/egs_bases/svs/midi/cascade/opencs/opencpop_statis.yaml b/NeuralSeq/egs/egs_bases/svs/midi/cascade/opencs/opencpop_statis.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bfe5cc939a46372bf4086d680d38c6737e1b7d81 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/svs/midi/cascade/opencs/opencpop_statis.yaml @@ -0,0 +1,41 @@ +spec_min: [-6., -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., + -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., + -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., + -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., + -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., + -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., -6., + -6., -6., -6., -6., -6., -6., -6., -6.] +spec_max: [-7.9453e-01, -8.1116e-01, -6.1631e-01, -3.0679e-01, -1.3863e-01, + -5.0652e-02, -1.1563e-01, -1.0679e-01, -9.1068e-02, -6.2174e-02, + -7.5302e-02, -7.2217e-02, -6.3815e-02, -7.3299e-02, 7.3610e-03, + -7.2508e-02, -5.0234e-02, -1.6534e-01, -2.6928e-01, -2.0782e-01, + -2.0823e-01, -1.1702e-01, -7.0128e-02, -6.5868e-02, -1.2675e-02, + 1.5121e-03, -8.9902e-02, -2.1392e-01, -2.3789e-01, -2.8922e-01, + -3.0405e-01, -2.3029e-01, -2.2088e-01, -2.1542e-01, -2.9367e-01, + -3.0137e-01, -3.8281e-01, -4.3590e-01, -2.8681e-01, -4.6855e-01, + -5.7485e-01, -4.7022e-01, -5.4266e-01, -4.4848e-01, -6.4120e-01, + -6.8700e-01, -6.4860e-01, -7.6436e-01, -4.9971e-01, -7.1068e-01, + -6.9724e-01, -6.1487e-01, -5.5843e-01, -6.9773e-01, -5.7502e-01, + -7.0919e-01, -8.2431e-01, -8.4213e-01, -9.0431e-01, -8.2840e-01, + -7.7945e-01, -8.2758e-01, -8.7699e-01, -1.0532e+00, -1.0766e+00, + -1.1198e+00, -1.0185e+00, -9.8983e-01, -1.0001e+00, -1.0756e+00, + -1.0024e+00, -1.0304e+00, -1.0579e+00, -1.0188e+00, -1.0500e+00, + -1.0842e+00, -1.0923e+00, -1.1223e+00, -1.2381e+00, -1.6467e+00] + +mel_vmin: -6. #-6. +mel_vmax: 1.5 +wav2spec_eps: 1e-6 + +raw_data_dir: 'data/raw/opencpop/segments' +processed_data_dir: 'xxx' +binary_data_dir: 'data/binary/opencpop-midi-dp' +datasets: [ + 'opencpop', +] +test_prefixes: [ + '2044', + '2086', + '2092', + '2093', + '2100', +] diff --git a/NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds1000-10dil.yaml b/NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds1000-10dil.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8825430a386b8748847939a1366e54f344b50e92 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds1000-10dil.yaml @@ -0,0 +1,38 @@ +base_config: + - egs/egs_bases/svs/popcs_ds_beta6.yaml + - egs/egs_bases/svs/midi/cascade/opencs/opencpop_statis.yaml + +binarizer_cls: data_gen.singing.binarize.OpencpopBinarizer +binary_data_dir: 'data/binary/opencpop-midi-dp' + +#switch_midi2f0_step: 174000 +use_midi: true # for midi exp +use_gt_dur: false # for further midi exp +lambda_ph_dur: 1.0 +lambda_sent_dur: 1.0 +lambda_word_dur: 1.0 +predictor_grad: 0.1 +dur_predictor_layers: 5 # * + + +fs2_ckpt: '' # +#num_valid_plots: 0 +task_cls: tasks.svs.diffsinger_task.DiffSingerMIDITask + +timesteps: 1000 +K_step: 1000 +max_beta: 0.02 +max_tokens: 36000 +max_updates: 320000 +gaussian_start: True + +use_pitch_embed: false +use_gt_f0: false # for midi exp + +lambda_f0: 0. +lambda_uv: 0. +dilation_cycle_length: 10 # * +rel_pos: true +predictor_layers: 5 +pe_enable: true +pe_ckpt: 'checkpoints/0102_xiaoma_pe' diff --git a/NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds1000.yaml b/NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds1000.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3c942cda62b2da399b105b10f259dabe417c3ac7 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds1000.yaml @@ -0,0 +1,42 @@ +base_config: + - egs/egs_bases/svs/popcs_ds_beta6.yaml + - egs/egs_bases/svs/midi/cascade/opencs/opencpop_statis.yaml + +binarizer_cls: data_gen.singing.binarize.OpencpopBinarizer +binary_data_dir: 'data/binary/opencpop-midi-dp' + +#switch_midi2f0_step: 174000 +use_midi: true # for midi exp +use_gt_dur: false # for further midi exp +lambda_ph_dur: 1.0 +lambda_sent_dur: 1.0 +lambda_word_dur: 1.0 +predictor_grad: 0.1 +dur_predictor_layers: 5 # * + + +fs2_ckpt: '' # +#num_valid_plots: 0 +task_cls: tasks.svs.diffsinger_task.DiffSingerMIDITask + +# for diffusion schedule +timesteps: 1000 +K_step: 1000 +max_beta: 0.02 +max_tokens: 36000 +max_updates: 320000 +gaussian_start: True +pndm_speedup: 10 + +use_pitch_embed: false +use_gt_f0: false # for midi exp + +lambda_f0: 0. +lambda_uv: 0. +dilation_cycle_length: 4 # * +rel_pos: true +predictor_layers: 5 +pe_enable: true +pe_ckpt: 'checkpoints/0102_xiaoma_pe' + + diff --git a/NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds100_adj_rel.yaml b/NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds100_adj_rel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3cdda09d091396b9513fd1f4d19099ece73d618f --- /dev/null +++ b/NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds100_adj_rel.yaml @@ -0,0 +1,36 @@ +base_config: + - egs/egs_bases/svs/popcs_ds_beta6.yaml + - egs/egs_bases/svs/midi/cascade/opencs/opencpop_statis.yaml + +binarizer_cls: data_gen.singing.binarize.OpencpopBinarizer +binary_data_dir: 'data/binary/opencpop-midi-dp' + +#switch_midi2f0_step: 174000 +use_midi: true # for midi exp +use_gt_dur: false # for further midi exp +lambda_ph_dur: 1.0 +lambda_sent_dur: 1.0 +lambda_word_dur: 1.0 +predictor_grad: 0.1 +dur_predictor_layers: 5 # * + + +fs2_ckpt: '' # +#num_valid_plots: 0 +task_cls: tasks.svs.diffsinger_task.DiffSingerMIDITask + +K_step: 100 +max_tokens: 36000 +max_updates: 160000 +gaussian_start: True + +use_pitch_embed: false +use_gt_f0: false # for midi exp + +lambda_f0: 0. +lambda_uv: 0. +dilation_cycle_length: 4 # * +rel_pos: true +predictor_layers: 5 +pe_enable: true +pe_ckpt: 'checkpoints/0102_xiaoma_pe' diff --git a/NeuralSeq/egs/egs_bases/svs/midi/e2e/popcs/ds100_adj_rel.yaml b/NeuralSeq/egs/egs_bases/svs/midi/e2e/popcs/ds100_adj_rel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b51b4f223e6833430457456c210807704aab03b2 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/svs/midi/e2e/popcs/ds100_adj_rel.yaml @@ -0,0 +1,36 @@ +base_config: + - egs/egs_bases/svs/popcs_ds_beta6.yaml + - egs/egs_bases/svs/midi/cascade/popcs/popcs_statis.yaml + +binarizer_cls: data_gen.singing.binarize.MidiSingingBinarizer +binary_data_dir: 'data/binary/popcs-midi-dp' + +#switch_midi2f0_step: 174000 +use_midi: true # for midi exp +use_gt_dur: false # for further midi exp +lambda_ph_dur: 1.0 +lambda_sent_dur: 1.0 +lambda_word_dur: 1.0 +predictor_grad: 0.1 +dur_predictor_layers: 5 # * + + +fs2_ckpt: '' # +#num_valid_plots: 0 +task_cls: tasks.svs.diffsinger_task.DiffSingerMIDITask + +K_step: 100 +max_tokens: 40000 +max_updates: 160000 +gaussian_start: True + +use_pitch_embed: false +use_gt_f0: false # for midi exp + +lambda_f0: 0. +lambda_uv: 0. +dilation_cycle_length: 4 # * +rel_pos: true +predictor_layers: 5 +pe_enable: true +pe_ckpt: 'checkpoints/0102_xiaoma_pe' diff --git a/NeuralSeq/egs/egs_bases/svs/midi/pe.yaml b/NeuralSeq/egs/egs_bases/svs/midi/pe.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8321efe33939986744351b9144416a7c87e22965 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/svs/midi/pe.yaml @@ -0,0 +1,24 @@ +base_config: + - configs/tts/lj/fs2.yaml + +max_frames: 8000 +audio_sample_rate: 24000 +hop_size: 128 # Hop size. +fft_size: 512 # FFT size. +win_size: 512 # FFT size. +fmin: 30 +fmax: 12000 +min_level_db: -120 + +binary_data_dir: 'xxx' + +pitch_type: frame +task_cls: tasks.tts.pe.PitchExtractionTask +pitch_extractor_conv_layers: 2 + + +# config for experiments +max_tokens: 20000 +use_spk_embed: false +num_valid_plots: 10 +max_updates: 60000 \ No newline at end of file diff --git a/NeuralSeq/egs/egs_bases/svs/popcs_ds_beta6.yaml b/NeuralSeq/egs/egs_bases/svs/popcs_ds_beta6.yaml new file mode 100644 index 0000000000000000000000000000000000000000..000c9415ebf61e922be184ae89ffec1eae65a639 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/svs/popcs_ds_beta6.yaml @@ -0,0 +1,70 @@ +base_config: + - configs/tts/fs2.yaml + - configs/singing/base.yaml + - ./base.yaml + +audio_sample_rate: 24000 +hop_size: 128 # Hop size. +fft_size: 512 # FFT size. +win_size: 512 # FFT size. +fmin: 30 +fmax: 12000 +min_level_db: -120 + +binarization_args: + with_wav: true + with_spk_embed: false + with_align: true +raw_data_dir: 'data/raw/popcs' +processed_data_dir: 'data/processed/popcs' +binary_data_dir: 'data/binary/popcs-pmf0' +num_spk: 1 +datasets: [ + 'popcs', +] +test_prefixes: [ + 'popcs-说散就散', + 'popcs-隐形的翅膀', +] + +spec_min: [-6.8276, -7.0270, -6.8142, -7.1429, -7.6669, -7.6000, -7.1148, -6.9640, + -6.8414, -6.6596, -6.6880, -6.7439, -6.7986, -7.4940, -7.7845, -7.6586, + -6.9288, -6.7639, -6.9118, -6.8246, -6.7183, -7.1769, -6.9794, -7.4513, + -7.3422, -7.5623, -6.9610, -6.8158, -6.9595, -6.8403, -6.5688, -6.6356, + -7.0209, -6.5002, -6.7819, -6.5232, -6.6927, -6.5701, -6.5531, -6.7069, + -6.6462, -6.4523, -6.5954, -6.4264, -6.4487, -6.7070, -6.4025, -6.3042, + -6.4008, -6.3857, -6.3903, -6.3094, -6.2491, -6.3518, -6.3566, -6.4168, + -6.2481, -6.3624, -6.2858, -6.2575, -6.3638, -6.4520, -6.1835, -6.2754, + -6.1253, -6.1645, -6.0638, -6.1262, -6.0710, -6.1039, -6.4428, -6.1363, + -6.1054, -6.1252, -6.1797, -6.0235, -6.0758, -5.9453, -6.0213, -6.0446] +spec_max: [ 0.2645, 0.0583, -0.2344, -0.0184, 0.1227, 0.1533, 0.1103, 0.1212, + 0.2421, 0.1809, 0.2134, 0.3161, 0.3301, 0.3289, 0.2667, 0.2421, + 0.2581, 0.2600, 0.1394, 0.1907, 0.1082, 0.1474, 0.1680, 0.2550, + 0.1057, 0.0826, 0.0423, 0.1203, -0.0701, -0.0056, 0.0477, -0.0639, + -0.0272, -0.0728, -0.1648, -0.0855, -0.2652, -0.1998, -0.1547, -0.2167, + -0.4181, -0.5463, -0.4161, -0.4733, -0.6518, -0.5387, -0.4290, -0.4191, + -0.4151, -0.3042, -0.3810, -0.4160, -0.4496, -0.2847, -0.4676, -0.4658, + -0.4931, -0.4885, -0.5547, -0.5481, -0.6948, -0.7968, -0.8455, -0.8392, + -0.8770, -0.9520, -0.8749, -0.7297, -0.8374, -0.8667, -0.7157, -0.9035, + -0.9219, -0.8801, -0.9298, -0.9009, -0.9604, -1.0537, -1.0781, -1.3766] + +task_cls: tasks.svs.diffsinger_task.DiffSingerTask +#vocoder: tasks.svs.singingvocoder.highgan.HighGAN +#vocoder_ckpt: checkpoints/h_2_model/checkpoint-530000steps.pkl +vocoder: vocoders.hifigan.HifiGAN +vocoder_ckpt: checkpoints/0109_hifigan_bigpopcs_hop128 + +pitch_extractor: 'parselmouth' +# config for experiments +use_spk_embed: false +num_valid_plots: 10 +max_updates: 160000 +lr: 0.001 +timesteps: 100 +K_step: 51 +diff_loss_type: l1 +diff_decoder_type: 'wavenet' +schedule_type: 'linear' +max_beta: 0.06 +fs2_ckpt: '' +use_nsf: true \ No newline at end of file diff --git a/NeuralSeq/egs/egs_bases/svs/popcs_ds_beta6_offline.yaml b/NeuralSeq/egs/egs_bases/svs/popcs_ds_beta6_offline.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8628623e606b6ada6f7c5e8025ced36982fcb926 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/svs/popcs_ds_beta6_offline.yaml @@ -0,0 +1,12 @@ +base_config: + - ./popcs_ds_beta6.yaml + +fs2_ckpt: checkpoints/popcs_fs2_pmf0_1230/model_ckpt_steps_160000.ckpt # to be infer +num_valid_plots: 0 +task_cls: tasks.svs.diffsinger_task.DiffSingerOfflineTask + +# tmp: +#pe_enable: true +#pe_ckpt: '' +vocoder: vocoders.hifigan.HifiGAN +vocoder_ckpt: checkpoints/0109_hifigan_bigpopcs_hop128 \ No newline at end of file diff --git a/NeuralSeq/egs/egs_bases/svs/popcs_fs2.yaml b/NeuralSeq/egs/egs_bases/svs/popcs_fs2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a9f1a7a97f7eedd018c8d2ac93422c1ea899fb60 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/svs/popcs_fs2.yaml @@ -0,0 +1,44 @@ +base_config: + - configs/singing/fs2.yaml + +audio_sample_rate: 24000 +hop_size: 128 # Hop size. +fft_size: 512 # FFT size. +win_size: 512 # FFT size. +fmin: 30 +fmax: 12000 +min_level_db: -120 + +binarization_args: + with_wav: true + with_spk_embed: false + with_align: true +raw_data_dir: 'data/raw/popcs' +processed_data_dir: 'data/processed/popcs' +binary_data_dir: 'data/binary/popcs-pmf0' +num_spk: 1 +datasets: [ + 'popcs', +] +test_prefixes: [ + 'popcs-说散就散', + 'popcs-隐形的翅膀', +] + +task_cls: tasks.tts.fs2.FastSpeech2Task +#vocoder: tasks.svs.singingvocoder.highgan.HighGAN +#vocoder_ckpt: checkpoints/h_2_model/checkpoint-530000steps.pkl +vocoder: vocoders.hifigan.HifiGAN +vocoder_ckpt: checkpoints/0109_hifigan_bigpopcs_hop128 +use_nsf: true + +# config for experiments +max_tokens: 18000 +use_spk_embed: false +num_valid_plots: 10 +max_updates: 160000 +save_gt: true + +# tmp: +#pe_enable: true +#pe_ckpt: '' \ No newline at end of file diff --git a/NeuralSeq/egs/egs_bases/tts/base.yaml b/NeuralSeq/egs/egs_bases/tts/base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..255e4cce1bb52d58d24443bc277621d70732583f --- /dev/null +++ b/NeuralSeq/egs/egs_bases/tts/base.yaml @@ -0,0 +1,112 @@ +# task +base_config: ../config_base.yaml +task_cls: '' +############# +# dataset +############# +raw_data_dir: '' +processed_data_dir: '' +binary_data_dir: '' +dict_dir: '' +pre_align_cls: '' +binarizer_cls: data_gen.tts.base_binarizer.BaseBinarizer +pre_align_args: + txt_processor: en + use_tone: true # for ZH + sox_resample: false + sox_to_wav: false + allow_no_txt: false + trim_sil: false + denoise: false +binarization_args: + shuffle: false + with_txt: true + with_wav: false + with_align: true + with_spk_embed: false + with_spk_id: true + with_f0: true + with_f0cwt: false + with_linear: false + with_word: true + trim_sil: false + trim_eos_bos: false + reset_phone_dict: true + reset_word_dict: true +word_size: 30000 +pitch_extractor: parselmouth + +loud_norm: false +endless_ds: true + +test_num: 100 +min_frames: 0 +max_frames: 1548 +frames_multiple: 1 +max_input_tokens: 1550 +audio_num_mel_bins: 80 +audio_sample_rate: 22050 +hop_size: 256 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate) +win_size: 1024 # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate) +fmin: 80 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) +fmax: 7600 # To be increased/reduced depending on data. +fft_size: 1024 # Extra window size is filled with 0 paddings to match this parameter +min_level_db: -100 +ref_level_db: 20 +griffin_lim_iters: 60 +num_spk: 1 +mel_vmin: -6 +mel_vmax: 1.5 +ds_workers: 1 + +######### +# model +######### +dropout: 0.1 +enc_layers: 4 +dec_layers: 4 +hidden_size: 256 +num_heads: 2 +enc_ffn_kernel_size: 9 +dec_ffn_kernel_size: 9 +ffn_act: gelu +ffn_padding: 'SAME' +use_spk_id: false +use_split_spk_id: false +use_spk_embed: false + + +########### +# optimization +########### +lr: 2.0 +scheduler: rsqrt # rsqrt|none +warmup_updates: 8000 +optimizer_adam_beta1: 0.9 +optimizer_adam_beta2: 0.98 +weight_decay: 0 +clip_grad_norm: 1 +clip_grad_value: 0 + + +########### +# train and eval +########### +max_tokens: 30000 +max_sentences: 100000 +max_valid_sentences: 1 +max_valid_tokens: 60000 +valid_infer_interval: 10000 +train_set_name: 'train' +train_sets: '' +valid_set_name: 'valid' +test_set_name: 'test' +num_test_samples: 0 +num_valid_plots: 10 +test_ids: [ ] +vocoder_denoise_c: 0.0 +profile_infer: false +out_wav_norm: false +save_gt: true +save_f0: false +gen_dir_name: '' \ No newline at end of file diff --git a/NeuralSeq/egs/egs_bases/tts/base_zh.yaml b/NeuralSeq/egs/egs_bases/tts/base_zh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6e6400eebc67900e3bf7e71e4575762a6b5fdc3e --- /dev/null +++ b/NeuralSeq/egs/egs_bases/tts/base_zh.yaml @@ -0,0 +1,6 @@ +base_config: ./base.yaml +preprocess_args: + txt_processor: zh + use_tone: true + +word_size: 3000 \ No newline at end of file diff --git a/NeuralSeq/egs/egs_bases/tts/fs2.yaml b/NeuralSeq/egs/egs_bases/tts/fs2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c200a50ad2e04ff685bd30c7d5c3be69ff7cdf3f --- /dev/null +++ b/NeuralSeq/egs/egs_bases/tts/fs2.yaml @@ -0,0 +1,102 @@ +base_config: ./base.yaml +task_cls: tasks.tts.fs2.FastSpeech2Task + +# model +hidden_size: 256 +dropout: 0.1 +encoder_type: fft # rel_fft|fft|tacotron|tacotron2|conformer +decoder_type: fft # fft|rnn|conv|conformer|wn + +# rnn enc/dec +encoder_K: 8 +decoder_rnn_dim: 0 # for rnn decoder, 0 -> hidden_size * 2 + +# fft enc/dec +use_pos_embed: true +dec_num_heads: 2 +dec_layers: 4 +ffn_hidden_size: 1024 +enc_ffn_kernel_size: 9 +dec_ffn_kernel_size: 9 + +# conv enc/dec +enc_dec_norm: ln +conv_use_pos: false +layers_in_block: 2 +enc_dilations: [ 1, 1, 1, 1 ] +enc_kernel_size: 5 +dec_dilations: [ 1, 1, 1, 1 ] # for conv decoder +dec_kernel_size: 5 +dur_loss: mse # huber|mol + +# duration +predictor_hidden: -1 +predictor_kernel: 5 +predictor_layers: 2 +dur_predictor_kernel: 3 +dur_predictor_layers: 2 +predictor_dropout: 0.5 + +# pitch and energy +pitch_norm: standard # standard|log +use_pitch_embed: true +pitch_type: frame # frame|ph|cwt +use_uv: true +cwt_hidden_size: 128 +cwt_layers: 2 +cwt_loss: l1 +cwt_add_f0_loss: false +cwt_std_scale: 0.8 + +pitch_ar: false +pitch_embed_type: 0 +pitch_loss: 'l1' # l1|l2|ssim +pitch_ssim_win: 11 +use_energy_embed: false + +# reference encoder and speaker embedding +use_ref_enc: false +use_var_enc: false +lambda_commit: 0.25 +var_enc_vq_codes: 64 +ref_norm_layer: bn +dec_inp_add_noise: false +sil_add_noise: false +ref_hidden_stride_kernel: + - 0,3,5 # conv_hidden_size, conv_stride, conv_kernel_size. conv_hidden_size=0: use hidden_size + - 0,3,5 + - 0,2,5 + - 0,2,5 + - 0,2,5 +pitch_enc_hidden_stride_kernel: + - 0,2,5 # conv_hidden_size, conv_stride, conv_kernel_size. conv_hidden_size=0: use hidden_size + - 0,2,5 + - 0,2,5 +dur_enc_hidden_stride_kernel: + - 0,2,3 # conv_hidden_size, conv_stride, conv_kernel_size. conv_hidden_size=0: use hidden_size + - 0,2,3 + - 0,1,3 + +# mel +mel_loss: l1:0.5|ssim:0.5 # l1|l2|gdl|ssim or l1:0.5|ssim:0.5 + +# loss lambda +lambda_f0: 1.0 +lambda_uv: 1.0 +lambda_energy: 0.1 +lambda_ph_dur: 0.1 +lambda_sent_dur: 1.0 +lambda_word_dur: 1.0 +predictor_grad: 0.1 + +# train and eval +pretrain_fs_ckpt: '' +warmup_updates: 2000 +max_tokens: 32000 +max_sentences: 100000 +max_valid_sentences: 1 +max_updates: 120000 +use_gt_dur: false +use_gt_f0: false +ds_workers: 2 +lr: 1.0 diff --git a/NeuralSeq/egs/egs_bases/tts/fs2_adv.yaml b/NeuralSeq/egs/egs_bases/tts/fs2_adv.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c316b85e370301172286fe938427950269c08dd7 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/tts/fs2_adv.yaml @@ -0,0 +1,47 @@ +base_config: ./fs2.yaml +task_cls: tasks.tts.fs2_adv.FastSpeech2AdvTask + +disc_win_num: 3 +disc_interval: 1 +disc_reduction: stack # stack|sum|none +disc_start_steps: 0 +rerun_gen: false + +disc_norm: in +mel_disc_hidden_size: 128 + +# mel decoder +mel_gan: true +lambda_mel_adv: 0.1 +mel_hidden_size: 256 + +# others +dropout: 0.05 +pitch_embed_type: 0 +enc_ffn_kernel_size: 9 +dec_ffn_kernel_size: 9 +use_cond_disc: false + +optimizer_adam_beta1: 0.5 +optimizer_adam_beta2: 0.999 +generator_grad_norm: 5.0 # Generator's gradient norm. +disc_hidden_size: 128 +disc_lr: 0.0001 # Discriminator's learning rate. +discriminator_optimizer_params: + eps: 1.0e-6 # Discriminator's epsilon. + weight_decay: 0.0 # Discriminator's weight decay coefficient. +discriminator_scheduler_params: + step_size: 60000 # Discriminator's scheduler step size. + gamma: 0.5 # D5iscriminator's scheduler gamma. + # At each step size, lr will be multiplied by this parameter. +discriminator_grad_norm: 1 # Discriminator's gradient norm. + +max_updates: 400000 +max_tokens: 30000 +max_sentences: 80 +val_check_interval: 2000 + +gen_dir_name: '' +num_ckpt_keep: 2 +save_best: false + diff --git a/NeuralSeq/egs/egs_bases/tts/ps.yaml b/NeuralSeq/egs/egs_bases/tts/ps.yaml new file mode 100644 index 0000000000000000000000000000000000000000..23e93bf94679b0a436f2d98d27d0e25893f32300 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/tts/ps.yaml @@ -0,0 +1,63 @@ +base_config: ./fs2.yaml + +########################### +# models +########################### +# encoders +hidden_size: 192 +ffn_hidden_size: 768 +enc_ffn_kernel_size: 5 +enc_layers: 4 +dur_level: word +encoder_type: rel_fft +use_word_encoder: true + +# mix ling encoder +word_enc_layers: 4 +word_encoder_type: rel_fft +use_pitch_embed: false +enc_prenet: true +enc_pre_ln: true +text_encoder_postnet: true +dropout: 0.0 +add_word_pos: true + +# dur predictor +dur_predictor_layers: 3 +dur_predictor_kernel: 5 +predictor_dropout: 0.2 + +## fvae +use_fvae: true +latent_size: 16 +fvae_encoder_type: conv +fvae_decoder_type: conv +fvae_enc_dec_hidden: 192 +fvae_kernel_size: 5 +fvae_enc_n_layers: 8 +fvae_dec_n_layers: 4 +fvae_strides: 4 +fvae_noise_scale: 1.0 + +# prior flow +use_prior_flow: true +prior_flow_hidden: 64 +prior_flow_kernel_size: 3 +prior_flow_n_blocks: 4 + +########################### +# training and inference +########################### +lambda_kl: 1.0 +kl_min: 0.0 +lambda_sent_dur: 0.0 +kl_start_steps: 10000 +posterior_start_steps: 0 +frames_multiple: 4 +num_valid_plots: 10 +lr: 0.0002 +warmup_updates: 8000 +max_tokens: 40000 +valid_infer_interval: 10000 +max_sentences: 80 +max_updates: 480000 \ No newline at end of file diff --git a/NeuralSeq/egs/egs_bases/tts/ps_flow.yaml b/NeuralSeq/egs/egs_bases/tts/ps_flow.yaml new file mode 100644 index 0000000000000000000000000000000000000000..36747ea5dcc065bdda68e79976d3f1827b5853f4 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/tts/ps_flow.yaml @@ -0,0 +1,20 @@ +base_config: ./ps2.yaml +task_cls: tasks.tts.ps_flow.PortaSpeechFlowTask + +use_post_flow: true +detach_postflow_input: true +post_flow_lr: 0.001 +post_glow_hidden: 192 +post_glow_kernel_size: 3 +post_glow_n_blocks: 12 +post_glow_n_block_layers: 3 +post_share_cond_layers: false +share_wn_layers: 4 +use_cond_proj: false +use_latent_cond: false +use_txt_cond: true +sigmoid_scale: false +post_glow_training_start: 160000 +noise_scale: 0.8 +infer_post_glow: true +two_stage: true \ No newline at end of file diff --git a/NeuralSeq/egs/egs_bases/tts/ps_flow_small.yaml b/NeuralSeq/egs/egs_bases/tts/ps_flow_small.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e95a86492ee9c6516a05201bf4d0a2de6e55e947 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/tts/ps_flow_small.yaml @@ -0,0 +1,42 @@ +base_config: ./ps_flow.yaml + +########################### +# models +########################### +# encoders +hidden_size: 128 +ffn_hidden_size: 512 +enc_ffn_kernel_size: 3 +enc_layers: 3 +word_enc_layers: 3 + +# dur predictor +dur_predictor_layers: 3 +dur_predictor_kernel: 5 +predictor_dropout: 0.2 + +## fvae +use_fvae: true +latent_size: 16 +fvae_encoder_type: wn +fvae_decoder_type: wn +fvae_enc_dec_hidden: 128 +fvae_kernel_size: 3 +fvae_enc_n_layers: 8 +fvae_dec_n_layers: 3 +fvae_strides: 4 +fvae_noise_scale: 1.0 + + +# prior flow +use_prior_flow: true +prior_flow_hidden: 32 +prior_flow_kernel_size: 3 +prior_flow_n_blocks: 3 +# post flow +post_glow_hidden: 128 +post_glow_kernel_size: 3 +post_glow_n_blocks: 8 +post_glow_n_block_layers: 3 +share_wn_layers: 4 +noise_scale: 0.6 \ No newline at end of file diff --git a/NeuralSeq/egs/egs_bases/tts/vocoder/base.yaml b/NeuralSeq/egs/egs_bases/tts/vocoder/base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..92a1a74e269f5245af09a1bc00a5998f069b6801 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/tts/vocoder/base.yaml @@ -0,0 +1,34 @@ +base_config: ../base.yaml +binarization_args: + with_wav: true + with_spk_embed: false + with_align: false + with_word: false + with_txt: false + +########### +# train and eval +########### +max_samples: 25600 +max_sentences: 5 +max_valid_sentences: 1 +max_updates: 1000000 +val_check_interval: 2000 + +########################################################### +# FEATURE EXTRACTION SETTING # +########################################################### +fft_size: 1024 # FFT size. +hop_size: 256 # Hop size. +win_length: null # Window length. +# If set to null, it will be the same as fft_size. +window: "hann" # Window function. +num_mels: 80 # Number of mel basis. +fmin: 80 # Minimum freq in mel basis calculation. +fmax: 7600 # Maximum frequency in mel basis calculation. +aux_context_window: 0 # Context window size for auxiliary feature. +use_pitch_embed: false + +generator_grad_norm: 10 # Generator's gradient norm. +discriminator_grad_norm: 1 # Discriminator's gradient norm. +disc_start_steps: 40000 # Number of steps to start to train discriminator. diff --git a/NeuralSeq/egs/egs_bases/tts/vocoder/hifigan.yaml b/NeuralSeq/egs/egs_bases/tts/vocoder/hifigan.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fb77ba5104bf9d5b7ce9282f626bf56e957fbfe8 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/tts/vocoder/hifigan.yaml @@ -0,0 +1,28 @@ +base_config: ./base.yaml +task_cls: tasks.vocoder.hifigan.HifiGanTask +resblock: "1" +adam_b1: 0.8 +adam_b2: 0.99 +upsample_rates: [ 8,8,2,2 ] +upsample_kernel_sizes: [ 16,16,4,4 ] +upsample_initial_channel: 512 +resblock_kernel_sizes: [ 3,7,11 ] +resblock_dilation_sizes: [ [ 1,3,5 ], [ 1,3,5 ], [ 1,3,5 ] ] + +use_pitch_embed: false +use_fm_loss: false +use_ms_stft: false + +lambda_mel: 5.0 +lambda_mel_adv: 1.0 +lambda_cdisc: 4.0 +lambda_adv: 1.0 + +lr: 0.0002 # Generator's learning rate. +generator_scheduler_params: + step_size: 600 + gamma: 0.999 +discriminator_scheduler_params: + step_size: 600 + gamma: 0.999 +max_updates: 3000000 \ No newline at end of file diff --git a/NeuralSeq/egs/egs_bases/tts/vocoder/pwg.yaml b/NeuralSeq/egs/egs_bases/tts/vocoder/pwg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d95bbd92abcdf70fb7a38b15877509390696fd2 --- /dev/null +++ b/NeuralSeq/egs/egs_bases/tts/vocoder/pwg.yaml @@ -0,0 +1,82 @@ +base_config: ./base.yaml +task_cls: tasks.vocoder.pwg.PwgTask + +aux_context_window: 2 # Context window size for auxiliary feature. +use_pitch_embed: false +########################################################### +# GENERATOR NETWORK ARCHITECTURE SETTING # +########################################################### +generator_params: + in_channels: 1 # Number of input channels. + out_channels: 1 # Number of output channels. + kernel_size: 3 # Kernel size of dilated convolution. + layers: 30 # Number of residual block layers. + stacks: 3 # Number of stacks i.e., dilation cycles. + residual_channels: 64 # Number of channels in residual conv. + gate_channels: 128 # Number of channels in gated conv. + skip_channels: 64 # Number of channels in skip conv. + aux_channels: 80 # Number of channels for auxiliary feature conv. + # Must be the same as num_mels. + # If set to 2, previous 2 and future 2 frames will be considered. + dropout: 0.0 # Dropout rate. 0.0 means no dropout applied. + use_weight_norm: true # Whether to use weight norm. + # If set to true, it will be applied to all of the conv layers. + upsample_net: "ConvInUpsampleNetwork" # Upsampling network architecture. + upsample_params: # Upsampling network parameters. + upsample_scales: [4, 4, 4, 4] # Upsampling scales. Prodcut of these must be the same as hop size. + use_pitch_embed: false + use_nsf: false +########################################################### +# DISCRIMINATOR NETWORK ARCHITECTURE SETTING # +########################################################### +discriminator_params: + in_channels: 1 # Number of input channels. + out_channels: 1 # Number of output channels. + kernel_size: 3 # Number of output channels. + layers: 10 # Number of conv layers. + conv_channels: 64 # Number of chnn layers. + bias: true # Whether to use bias parameter in conv. + use_weight_norm: true # Whether to use weight norm. + # If set to true, it will be applied to all of the conv layers. + nonlinear_activation: "LeakyReLU" # Nonlinear function after each conv. + nonlinear_activation_params: # Nonlinear function parameters + negative_slope: 0.2 # Alpha in LeakyReLU. +rerun_gen: true + +########################################################### +# STFT LOSS SETTING # +########################################################### +stft_loss_params: + fft_sizes: [1024, 2048, 512] # List of FFT size for STFT-based loss. + hop_sizes: [120, 240, 50] # List of hop size for STFT-based loss + win_lengths: [600, 1200, 240] # List of window length for STFT-based loss. + window: "hann_window" # Window function for STFT-based loss +use_mel_loss: false + +########################################################### +# ADVERSARIAL LOSS SETTING # +########################################################### +lambda_adv: 4.0 # Loss balancing coefficient. + +########################################################### +# OPTIMIZER & SCHEDULER SETTING # +########################################################### +generator_optimizer_params: + lr: 0.0001 # Generator's learning rate. + eps: 1.0e-6 # Generator's epsilon. + weight_decay: 0.0 # Generator's weight decay coefficient. +generator_scheduler_params: + step_size: 200000 # Generator's scheduler step size. + gamma: 0.5 # Generator's scheduler gamma. + # At each step size, lr will be multiplied by this parameter. +generator_grad_norm: 10 # Generator's gradient norm. +discriminator_optimizer_params: + lr: 0.00005 # Discriminator's learning rate. + eps: 1.0e-6 # Discriminator's epsilon. + weight_decay: 0.0 # Discriminator's weight decay coefficient. +discriminator_scheduler_params: + step_size: 200000 # Discriminator's scheduler step size. + gamma: 0.5 # Discriminator's scheduler gamma. + # At each step size, lr will be multiplied by this parameter. +discriminator_grad_norm: 1 # Discriminator's gradient norm. +disc_start_steps: 40000 # Number of steps to start to train discriminator. diff --git a/NeuralSeq/gitattributes b/NeuralSeq/gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..7fd1b7b544c9ec416883d1cae83fc8b0c4ea20b0 --- /dev/null +++ b/NeuralSeq/gitattributes @@ -0,0 +1,33 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zstandard filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +model_ckpt_steps* filter=lfs diff=lfs merge=lfs -text +checkpoints/0831_opencpop_ds1000 filter=lfs diff=lfs merge=lfs -text diff --git a/NeuralSeq/inference/svs/__pycache__/base_svs_infer.cpython-37.pyc b/NeuralSeq/inference/svs/__pycache__/base_svs_infer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50f9173b835d9e7192c5ccba786a45fed931e560 Binary files /dev/null and b/NeuralSeq/inference/svs/__pycache__/base_svs_infer.cpython-37.pyc differ diff --git a/NeuralSeq/inference/svs/__pycache__/base_svs_infer.cpython-38.pyc b/NeuralSeq/inference/svs/__pycache__/base_svs_infer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67023323a112b44a98571a1329f2f392db4012e2 Binary files /dev/null and b/NeuralSeq/inference/svs/__pycache__/base_svs_infer.cpython-38.pyc differ diff --git a/NeuralSeq/inference/svs/__pycache__/ds_e2e.cpython-37.pyc b/NeuralSeq/inference/svs/__pycache__/ds_e2e.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd35cc505b7758778c80d8deb85424e72de9c9b8 Binary files /dev/null and b/NeuralSeq/inference/svs/__pycache__/ds_e2e.cpython-37.pyc differ diff --git a/NeuralSeq/inference/svs/__pycache__/ds_e2e.cpython-38.pyc b/NeuralSeq/inference/svs/__pycache__/ds_e2e.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31321763909441905bfc8d2e595ca28072a5249d Binary files /dev/null and b/NeuralSeq/inference/svs/__pycache__/ds_e2e.cpython-38.pyc differ diff --git a/NeuralSeq/inference/svs/base_svs_infer.py b/NeuralSeq/inference/svs/base_svs_infer.py new file mode 100644 index 0000000000000000000000000000000000000000..39ed74f29f7526d5149e4f0079a3681a3bac2582 --- /dev/null +++ b/NeuralSeq/inference/svs/base_svs_infer.py @@ -0,0 +1,265 @@ +import os + +import torch +import numpy as np +from modules.hifigan.hifigan import HifiGanGenerator +from vocoders.hifigan import HifiGAN +from inference.svs.opencpop.map import cpop_pinyin2ph_func + +from utils import load_ckpt +from utils.hparams import set_hparams, hparams +from utils.text_encoder import TokenTextEncoder +from pypinyin import pinyin, lazy_pinyin, Style +import librosa +import glob +import re + + +class BaseSVSInfer: + def __init__(self, hparams, device=None): + if device is None: + device = 'cuda' if torch.cuda.is_available() else 'cpu' + self.hparams = hparams + self.device = device + + phone_list = ["AP", "SP", "a", "ai", "an", "ang", "ao", "b", "c", "ch", "d", "e", "ei", "en", "eng", "er", "f", "g", + "h", "i", "ia", "ian", "iang", "iao", "ie", "in", "ing", "iong", "iu", "j", "k", "l", "m", "n", "o", + "ong", "ou", "p", "q", "r", "s", "sh", "t", "u", "ua", "uai", "uan", "uang", "ui", "un", "uo", "v", + "van", "ve", "vn", "w", "x", "y", "z", "zh"] + self.ph_encoder = TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',') + self.pinyin2phs = cpop_pinyin2ph_func() + self.spk_map = {'opencpop': 0} + + self.model = self.build_model() + self.model.eval() + self.model.to(self.device) + self.vocoder = self.build_vocoder() + self.vocoder.eval() + self.vocoder.to(self.device) + + def build_model(self): + raise NotImplementedError + + def forward_model(self, inp): + raise NotImplementedError + + def build_vocoder(self): + base_dir = hparams['vocoder_ckpt'] + config_path = f'{base_dir}/config.yaml' + ckpt = sorted(glob.glob(f'{base_dir}/model_ckpt_steps_*.ckpt'), key= + lambda x: int(re.findall(f'{base_dir}/model_ckpt_steps_(\d+).ckpt', x)[0]))[-1] + print('| load HifiGAN: ', ckpt) + ckpt_dict = torch.load(ckpt, map_location="cpu") + config = set_hparams(config_path, global_hparams=False) + state = ckpt_dict["state_dict"]["model_gen"] + vocoder = HifiGanGenerator(config) + vocoder.load_state_dict(state, strict=True) + vocoder.remove_weight_norm() + vocoder = vocoder.eval().to(self.device) + return vocoder + + def run_vocoder(self, c, **kwargs): + c = c.transpose(2, 1) # [B, 80, T] + f0 = kwargs.get('f0') # [B, T] + if f0 is not None and hparams.get('use_nsf'): + # f0 = torch.FloatTensor(f0).to(self.device) + y = self.vocoder(c, f0).view(-1) + else: + y = self.vocoder(c).view(-1) + # [T] + return y[None] + + def preprocess_word_level_input(self, inp): + # Pypinyin can't solve polyphonic words + text_raw = inp['text'].replace('最长', '最常').replace('长睫毛', '常睫毛') \ + .replace('那么长', '那么常').replace('多长', '多常') \ + .replace('很长', '很常') # We hope someone could provide a better g2p module for us by opening pull requests. + + # lyric + pinyins = lazy_pinyin(text_raw, strict=False) + ph_per_word_lst = [self.pinyin2phs[pinyin.strip()] for pinyin in pinyins if pinyin.strip() in self.pinyin2phs] + + # Note + note_per_word_lst = [x.strip() for x in inp['notes'].split('|') if x.strip() != ''] + mididur_per_word_lst = [x.strip() for x in inp['notes_duration'].split('|') if x.strip() != ''] + + if len(note_per_word_lst) == len(ph_per_word_lst) == len(mididur_per_word_lst): + print('Pass word-notes check.') + else: + print('The number of words does\'t match the number of notes\' windows. ', + 'You should split the note(s) for each word by | mark.') + print(ph_per_word_lst, note_per_word_lst, mididur_per_word_lst) + print(len(ph_per_word_lst), len(note_per_word_lst), len(mididur_per_word_lst)) + return None + + note_lst = [] + ph_lst = [] + midi_dur_lst = [] + is_slur = [] + for idx, ph_per_word in enumerate(ph_per_word_lst): + # for phs in one word: + # single ph like ['ai'] or multiple phs like ['n', 'i'] + ph_in_this_word = ph_per_word.split() + + # for notes in one word: + # single note like ['D4'] or multiple notes like ['D4', 'E4'] which means a 'slur' here. + note_in_this_word = note_per_word_lst[idx].split() + midi_dur_in_this_word = mididur_per_word_lst[idx].split() + # process for the model input + # Step 1. + # Deal with note of 'not slur' case or the first note of 'slur' case + # j ie + # F#4/Gb4 F#4/Gb4 + # 0 0 + for ph in ph_in_this_word: + ph_lst.append(ph) + note_lst.append(note_in_this_word[0]) + midi_dur_lst.append(midi_dur_in_this_word[0]) + is_slur.append(0) + # step 2. + # Deal with the 2nd, 3rd... notes of 'slur' case + # j ie ie + # F#4/Gb4 F#4/Gb4 C#4/Db4 + # 0 0 1 + if len(note_in_this_word) > 1: # is_slur = True, we should repeat the YUNMU to match the 2nd, 3rd... notes. + for idx in range(1, len(note_in_this_word)): + ph_lst.append(ph_in_this_word[-1]) + note_lst.append(note_in_this_word[idx]) + midi_dur_lst.append(midi_dur_in_this_word[idx]) + is_slur.append(1) + ph_seq = ' '.join(ph_lst) + + if len(ph_lst) == len(note_lst) == len(midi_dur_lst): + print(len(ph_lst), len(note_lst), len(midi_dur_lst)) + print('Pass word-notes check.') + else: + print('The number of words does\'t match the number of notes\' windows. ', + 'You should split the note(s) for each word by | mark.') + return None + return ph_seq, note_lst, midi_dur_lst, is_slur + + def preprocess_phoneme_level_input(self, inp): + ph_seq = inp['ph_seq'] + note_lst = inp['note_seq'].split() + midi_dur_lst = inp['note_dur_seq'].split() + is_slur = [float(x) for x in inp['is_slur_seq'].split()] + print(len(note_lst), len(ph_seq.split()), len(midi_dur_lst)) + if len(note_lst) == len(ph_seq.split()) == len(midi_dur_lst): + print('Pass word-notes check.') + else: + print('The number of words does\'t match the number of notes\' windows. ', + 'You should split the note(s) for each word by | mark.') + return None + return ph_seq, note_lst, midi_dur_lst, is_slur + + def preprocess_input(self, inp, input_type='word'): + """ + + :param inp: {'text': str, 'item_name': (str, optional), 'spk_name': (str, optional)} + :return: + """ + + item_name = inp.get('item_name', '') + spk_name = inp.get('spk_name', 'opencpop') + + # single spk + spk_id = self.spk_map[spk_name] + + # get ph seq, note lst, midi dur lst, is slur lst. + if input_type == 'word': + ret = self.preprocess_word_level_input(inp) + elif input_type == 'phoneme': # like transcriptions.txt in Opencpop dataset. + ret = self.preprocess_phoneme_level_input(inp) + else: + print('Invalid input type.') + return None + + if ret: + ph_seq, note_lst, midi_dur_lst, is_slur = ret + else: + print('==========> Preprocess_word_level or phone_level input wrong.') + return None + + # convert note lst to midi id; convert note dur lst to midi duration + try: + midis = [librosa.note_to_midi(x.split("/")[0]) if x != 'rest' else 0 + for x in note_lst] + midi_dur_lst = [float(x) for x in midi_dur_lst] + except Exception as e: + print(e) + print('Invalid Input Type.') + return None + + ph_token = self.ph_encoder.encode(ph_seq) + item = {'item_name': item_name, 'text': inp['text'], 'ph': ph_seq, 'spk_id': spk_id, + 'ph_token': ph_token, 'pitch_midi': np.asarray(midis), 'midi_dur': np.asarray(midi_dur_lst), + 'is_slur': np.asarray(is_slur), } + item['ph_len'] = len(item['ph_token']) + return item + + def input_to_batch(self, item): + item_names = [item['item_name']] + text = [item['text']] + ph = [item['ph']] + txt_tokens = torch.LongTensor(item['ph_token'])[None, :].to(self.device) + txt_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device) + spk_ids = torch.LongTensor(item['spk_id'])[None, :].to(self.device) + + pitch_midi = torch.LongTensor(item['pitch_midi'])[None, :hparams['max_frames']].to(self.device) + midi_dur = torch.FloatTensor(item['midi_dur'])[None, :hparams['max_frames']].to(self.device) + is_slur = torch.LongTensor(item['is_slur'])[None, :hparams['max_frames']].to(self.device) + + batch = { + 'item_name': item_names, + 'text': text, + 'ph': ph, + 'txt_tokens': txt_tokens, + 'txt_lengths': txt_lengths, + 'spk_ids': spk_ids, + 'pitch_midi': pitch_midi, + 'midi_dur': midi_dur, + 'is_slur': is_slur + } + return batch + + def postprocess_output(self, output): + return output + + def infer_once(self, inp): + inp = self.preprocess_input(inp, input_type=inp['input_type'] if inp.get('input_type') else 'word') + output = self.forward_model(inp) + output = self.postprocess_output(output) + return output + + @classmethod + def example_run(cls, inp): + from utils.audio import save_wav + set_hparams(print_hparams=False) + infer_ins = cls(hparams) + out = infer_ins.infer_once(inp) + os.makedirs('infer_out', exist_ok=True) + save_wav(out, f'infer_out/example_out.wav', hparams['audio_sample_rate']) + + +# if __name__ == '__main__': + # debug + # a = BaseSVSInfer(hparams) + # a.preprocess_input({'text': '你 说 你 不 SP 懂 为 何 在 这 时 牵 手 AP', + # 'notes': 'D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | rest | D#4/Eb4 | D4 | D4 | D4 | D#4/Eb4 | F4 | D#4/Eb4 | D4 | rest', + # 'notes_duration': '0.113740 | 0.329060 | 0.287950 | 0.133480 | 0.150900 | 0.484730 | 0.242010 | 0.180820 | 0.343570 | 0.152050 | 0.266720 | 0.280310 | 0.633300 | 0.444590' + # }) + + # b = { + # 'text': '小酒窝长睫毛AP是你最美的记号', + # 'notes': 'C#4/Db4 | F#4/Gb4 | G#4/Ab4 | A#4/Bb4 F#4/Gb4 | F#4/Gb4 C#4/Db4 | C#4/Db4 | rest | C#4/Db4 | A#4/Bb4 | G#4/Ab4 | A#4/Bb4 | G#4/Ab4 | F4 | C#4/Db4', + # 'notes_duration': '0.407140 | 0.376190 | 0.242180 | 0.509550 0.183420 | 0.315400 0.235020 | 0.361660 | 0.223070 | 0.377270 | 0.340550 | 0.299620 | 0.344510 | 0.283770 | 0.323390 | 0.360340' + # } + # c = { + # 'text': '小酒窝长睫毛AP是你最美的记号', + # 'ph_seq': 'x iao j iu w o ch ang ang j ie ie m ao AP sh i n i z ui m ei d e j i h ao', + # 'note_seq': 'C#4/Db4 C#4/Db4 F#4/Gb4 F#4/Gb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 F#4/Gb4 F#4/Gb4 F#4/Gb4 C#4/Db4 C#4/Db4 C#4/Db4 rest C#4/Db4 C#4/Db4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 F4 F4 C#4/Db4 C#4/Db4', + # 'note_dur_seq': '0.407140 0.407140 0.376190 0.376190 0.242180 0.242180 0.509550 0.509550 0.183420 0.315400 0.315400 0.235020 0.361660 0.361660 0.223070 0.377270 0.377270 0.340550 0.340550 0.299620 0.299620 0.344510 0.344510 0.283770 0.283770 0.323390 0.323390 0.360340 0.360340', + # 'is_slur_seq': '0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0' + # } # input like Opencpop dataset. + # a.preprocess_input(b) + # a.preprocess_input(c, input_type='phoneme') \ No newline at end of file diff --git a/NeuralSeq/inference/svs/ds_cascade.py b/NeuralSeq/inference/svs/ds_cascade.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ec5ede64d93ef1d182ec42ca36f1f1bb920360 --- /dev/null +++ b/NeuralSeq/inference/svs/ds_cascade.py @@ -0,0 +1,54 @@ +import torch +from inference.svs.base_svs_infer import BaseSVSInfer +from utils import load_ckpt +from utils.hparams import hparams +from modulesmodules.diff.shallow_diffusion_tts import GaussianDiffusion +from tasks.svs.diffsinger_task import DIFF_DECODERS + +class DiffSingerCascadeInfer(BaseSVSInfer): + def build_model(self): + model = GaussianDiffusion( + phone_encoder=self.ph_encoder, + out_dims=hparams['audio_num_mel_bins'], denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams), + timesteps=hparams['timesteps'], + K_step=hparams['K_step'], + loss_type=hparams['diff_loss_type'], + spec_min=hparams['spec_min'], spec_max=hparams['spec_max'], + ) + model.eval() + load_ckpt(model, hparams['work_dir'], 'model') + return model + + def forward_model(self, inp): + sample = self.input_to_batch(inp) + txt_tokens = sample['txt_tokens'] # [B, T_t] + spk_id = sample.get('spk_ids') + with torch.no_grad(): + output = self.model(txt_tokens, spk_id=spk_id, ref_mels=None, infer=True, + pitch_midi=sample['pitch_midi'], midi_dur=sample['midi_dur'], + is_slur=sample['is_slur']) + mel_out = output['mel_out'] # [B, T,80] + f0_pred = output['f0_denorm'] + wav_out = self.run_vocoder(mel_out, f0=f0_pred) + wav_out = wav_out.cpu().numpy() + return wav_out[0] + + +if __name__ == '__main__': + inp = { + 'text': '小酒窝长睫毛AP是你最美的记号', + 'notes': 'C#4/Db4 | F#4/Gb4 | G#4/Ab4 | A#4/Bb4 F#4/Gb4 | F#4/Gb4 C#4/Db4 | C#4/Db4 | rest | C#4/Db4 | A#4/Bb4 | G#4/Ab4 | A#4/Bb4 | G#4/Ab4 | F4 | C#4/Db4', + 'notes_duration': '0.407140 | 0.376190 | 0.242180 | 0.509550 0.183420 | 0.315400 0.235020 | 0.361660 | 0.223070 | 0.377270 | 0.340550 | 0.299620 | 0.344510 | 0.283770 | 0.323390 | 0.360340', + 'input_type': 'word' + } # user input: Chinese characters + c = { + 'text': '小酒窝长睫毛AP是你最美的记号', + 'ph_seq': 'x iao j iu w o ch ang ang j ie ie m ao AP sh i n i z ui m ei d e j i h ao', + 'note_seq': 'C#4/Db4 C#4/Db4 F#4/Gb4 F#4/Gb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 F#4/Gb4 F#4/Gb4 F#4/Gb4 C#4/Db4 C#4/Db4 C#4/Db4 rest C#4/Db4 C#4/Db4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 F4 F4 C#4/Db4 C#4/Db4', + 'note_dur_seq': '0.407140 0.407140 0.376190 0.376190 0.242180 0.242180 0.509550 0.509550 0.183420 0.315400 0.315400 0.235020 0.361660 0.361660 0.223070 0.377270 0.377270 0.340550 0.340550 0.299620 0.299620 0.344510 0.344510 0.283770 0.283770 0.323390 0.323390 0.360340 0.360340', + 'is_slur_seq': '0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0', + 'input_type': 'phoneme' + } # input like Opencpop dataset. + DiffSingerCascadeInfer.example_run(inp) + +# # CUDA_VISIBLE_DEVICES=1 python inference/svs/ds_cascade.py --config egs/egs_bases/svs/midi/cascade/opencs/ds60_rel.yaml --exp_name 0303_opencpop_ds58_midi \ No newline at end of file diff --git a/NeuralSeq/inference/svs/ds_e2e.py b/NeuralSeq/inference/svs/ds_e2e.py new file mode 100644 index 0000000000000000000000000000000000000000..7d69d49eff8076f97933538a1d1a59d80b553dd8 --- /dev/null +++ b/NeuralSeq/inference/svs/ds_e2e.py @@ -0,0 +1,67 @@ +import torch +# from inference.tts.fs import FastSpeechInfer +# from modules.tts.fs2_orig import FastSpeech2Orig +from inference.svs.base_svs_infer import BaseSVSInfer +from utils import load_ckpt +from utils.hparams import hparams +from modules.diff.shallow_diffusion_tts import GaussianDiffusion +from tasks.svs.diffsinger_task import DIFF_DECODERS +from modules.fastspeech.pe import PitchExtractor +import utils + + +class DiffSingerE2EInfer(BaseSVSInfer): + def build_model(self): + model = GaussianDiffusion( + phone_encoder=self.ph_encoder, + out_dims=hparams['audio_num_mel_bins'], denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams), + timesteps=hparams['timesteps'], + K_step=hparams['K_step'], + loss_type=hparams['diff_loss_type'], + spec_min=hparams['spec_min'], spec_max=hparams['spec_max'], + ) + model.eval() + load_ckpt(model, hparams['work_dir'], 'model') + + if hparams.get('pe_enable') is not None and hparams['pe_enable']: + self.pe = PitchExtractor().to(self.device) + utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True) + self.pe.eval() + return model + + def forward_model(self, inp): + sample = self.input_to_batch(inp) + txt_tokens = sample['txt_tokens'] # [B, T_t] + spk_id = sample.get('spk_ids') + with torch.no_grad(): + output = self.model(txt_tokens, spk_id=spk_id, ref_mels=None, infer=True, + pitch_midi=sample['pitch_midi'], midi_dur=sample['midi_dur'], + is_slur=sample['is_slur']) + mel_out = output['mel_out'] # [B, T,80] + if hparams.get('pe_enable') is not None and hparams['pe_enable']: + f0_pred = self.pe(mel_out)['f0_denorm_pred'] # pe predict from Pred mel + else: + f0_pred = output['f0_denorm'] + wav_out = self.run_vocoder(mel_out, f0=f0_pred) + wav_out = wav_out.cpu().numpy() + return wav_out[0] + +if __name__ == '__main__': + inp = { + 'text': '小酒窝长睫毛AP是你最美的记号', + 'notes': 'C#4/Db4 | F#4/Gb4 | G#4/Ab4 | A#4/Bb4 F#4/Gb4 | F#4/Gb4 C#4/Db4 | C#4/Db4 | rest | C#4/Db4 | A#4/Bb4 | G#4/Ab4 | A#4/Bb4 | G#4/Ab4 | F4 | C#4/Db4', + 'notes_duration': '0.407140 | 0.376190 | 0.242180 | 0.509550 0.183420 | 0.315400 0.235020 | 0.361660 | 0.223070 | 0.377270 | 0.340550 | 0.299620 | 0.344510 | 0.283770 | 0.323390 | 0.360340', + 'input_type': 'word' + } # user input: Chinese characters + inp = { + 'text': '小酒窝长睫毛AP是你最美的记号', + 'ph_seq': 'x iao j iu w o ch ang ang j ie ie m ao AP sh i n i z ui m ei d e j i h ao', + 'note_seq': 'C#4/Db4 C#4/Db4 F#4/Gb4 F#4/Gb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 F#4/Gb4 F#4/Gb4 F#4/Gb4 C#4/Db4 C#4/Db4 C#4/Db4 rest C#4/Db4 C#4/Db4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 F4 F4 C#4/Db4 C#4/Db4', + 'note_dur_seq': '0.407140 0.407140 0.376190 0.376190 0.242180 0.242180 0.509550 0.509550 0.183420 0.315400 0.315400 0.235020 0.361660 0.361660 0.223070 0.377270 0.377270 0.340550 0.340550 0.299620 0.299620 0.344510 0.344510 0.283770 0.283770 0.323390 0.323390 0.360340 0.360340', + 'is_slur_seq': '0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0', + 'input_type': 'phoneme' + } # input like Opencpop dataset. + DiffSingerE2EInfer.example_run(inp) + + +# CUDA_VISIBLE_DEVICES=3 python inference/svs/ds_e2e.py --config egs/egs_bases/svs/midi/e2e/opencpop/ds100_adj_rel.yaml --exp_name 0228_opencpop_ds100_rel \ No newline at end of file diff --git a/NeuralSeq/inference/svs/opencpop/__pycache__/map.cpython-37.pyc b/NeuralSeq/inference/svs/opencpop/__pycache__/map.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14823648fa27268f7ff4c3cbcd86d20f24793f64 Binary files /dev/null and b/NeuralSeq/inference/svs/opencpop/__pycache__/map.cpython-37.pyc differ diff --git a/NeuralSeq/inference/svs/opencpop/__pycache__/map.cpython-38.pyc b/NeuralSeq/inference/svs/opencpop/__pycache__/map.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72ba74565a97f2ecddfa17ffb6449e574bddac57 Binary files /dev/null and b/NeuralSeq/inference/svs/opencpop/__pycache__/map.cpython-38.pyc differ diff --git a/NeuralSeq/inference/svs/opencpop/cpop_pinyin2ph.txt b/NeuralSeq/inference/svs/opencpop/cpop_pinyin2ph.txt new file mode 100644 index 0000000000000000000000000000000000000000..fed726000048c318056098a663eff83afd323bc9 --- /dev/null +++ b/NeuralSeq/inference/svs/opencpop/cpop_pinyin2ph.txt @@ -0,0 +1,418 @@ +| a | a | +| ai | ai | +| an | an | +| ang | ang | +| ao | ao | +| ba | b a | +| bai | b ai | +| ban | b an | +| bang | b ang | +| bao | b ao | +| bei | b ei | +| ben | b en | +| beng | b eng | +| bi | b i | +| bian | b ian | +| biao | b iao | +| bie | b ie | +| bin | b in | +| bing | b ing | +| bo | b o | +| bu | b u | +| ca | c a | +| cai | c ai | +| can | c an | +| cang | c ang | +| cao | c ao | +| ce | c e | +| cei | c ei | +| cen | c en | +| ceng | c eng | +| cha | ch a | +| chai | ch ai | +| chan | ch an | +| chang | ch ang | +| chao | ch ao | +| che | ch e | +| chen | ch en | +| cheng | ch eng | +| chi | ch i | +| chong | ch ong | +| chou | ch ou | +| chu | ch u | +| chua | ch ua | +| chuai | ch uai | +| chuan | ch uan | +| chuang | ch uang | +| chui | ch ui | +| chun | ch un | +| chuo | ch uo | +| ci | c i | +| cong | c ong | +| cou | c ou | +| cu | c u | +| cuan | c uan | +| cui | c ui | +| cun | c un | +| cuo | c uo | +| da | d a | +| dai | d ai | +| dan | d an | +| dang | d ang | +| dao | d ao | +| de | d e | +| dei | d ei | +| den | d en | +| deng | d eng | +| di | d i | +| dia | d ia | +| dian | d ian | +| diao | d iao | +| die | d ie | +| ding | d ing | +| diu | d iu | +| dong | d ong | +| dou | d ou | +| du | d u | +| duan | d uan | +| dui | d ui | +| dun | d un | +| duo | d uo | +| e | e | +| ei | ei | +| en | en | +| eng | eng | +| er | er | +| fa | f a | +| fan | f an | +| fang | f ang | +| fei | f ei | +| fen | f en | +| feng | f eng | +| fo | f o | +| fou | f ou | +| fu | f u | +| ga | g a | +| gai | g ai | +| gan | g an | +| gang | g ang | +| gao | g ao | +| ge | g e | +| gei | g ei | +| gen | g en | +| geng | g eng | +| gong | g ong | +| gou | g ou | +| gu | g u | +| gua | g ua | +| guai | g uai | +| guan | g uan | +| guang | g uang | +| gui | g ui | +| gun | g un | +| guo | g uo | +| ha | h a | +| hai | h ai | +| han | h an | +| hang | h ang | +| hao | h ao | +| he | h e | +| hei | h ei | +| hen | h en | +| heng | h eng | +| hm | h m | +| hng | h ng | +| hong | h ong | +| hou | h ou | +| hu | h u | +| hua | h ua | +| huai | h uai | +| huan | h uan | +| huang | h uang | +| hui | h ui | +| hun | h un | +| huo | h uo | +| ji | j i | +| jia | j ia | +| jian | j ian | +| jiang | j iang | +| jiao | j iao | +| jie | j ie | +| jin | j in | +| jing | j ing | +| jiong | j iong | +| jiu | j iu | +| ju | j v | +| juan | j van | +| jue | j ve | +| jun | j vn | +| ka | k a | +| kai | k ai | +| kan | k an | +| kang | k ang | +| kao | k ao | +| ke | k e | +| kei | k ei | +| ken | k en | +| keng | k eng | +| kong | k ong | +| kou | k ou | +| ku | k u | +| kua | k ua | +| kuai | k uai | +| kuan | k uan | +| kuang | k uang | +| kui | k ui | +| kun | k un | +| kuo | k uo | +| la | l a | +| lai | l ai | +| lan | l an | +| lang | l ang | +| lao | l ao | +| le | l e | +| lei | l ei | +| leng | l eng | +| li | l i | +| lia | l ia | +| lian | l ian | +| liang | l iang | +| liao | l iao | +| lie | l ie | +| lin | l in | +| ling | l ing | +| liu | l iu | +| lo | l o | +| long | l ong | +| lou | l ou | +| lu | l u | +| luan | l uan | +| lun | l un | +| luo | l uo | +| lv | l v | +| lve | l ve | +| m | m | +| ma | m a | +| mai | m ai | +| man | m an | +| mang | m ang | +| mao | m ao | +| me | m e | +| mei | m ei | +| men | m en | +| meng | m eng | +| mi | m i | +| mian | m ian | +| miao | m iao | +| mie | m ie | +| min | m in | +| ming | m ing | +| miu | m iu | +| mo | m o | +| mou | m ou | +| mu | m u | +| n | n | +| na | n a | +| nai | n ai | +| nan | n an | +| nang | n ang | +| nao | n ao | +| ne | n e | +| nei | n ei | +| nen | n en | +| neng | n eng | +| ng | n g | +| ni | n i | +| nian | n ian | +| niang | n iang | +| niao | n iao | +| nie | n ie | +| nin | n in | +| ning | n ing | +| niu | n iu | +| nong | n ong | +| nou | n ou | +| nu | n u | +| nuan | n uan | +| nun | n un | +| nuo | n uo | +| nv | n v | +| nve | n ve | +| o | o | +| ou | ou | +| pa | p a | +| pai | p ai | +| pan | p an | +| pang | p ang | +| pao | p ao | +| pei | p ei | +| pen | p en | +| peng | p eng | +| pi | p i | +| pian | p ian | +| piao | p iao | +| pie | p ie | +| pin | p in | +| ping | p ing | +| po | p o | +| pou | p ou | +| pu | p u | +| qi | q i | +| qia | q ia | +| qian | q ian | +| qiang | q iang | +| qiao | q iao | +| qie | q ie | +| qin | q in | +| qing | q ing | +| qiong | q iong | +| qiu | q iu | +| qu | q v | +| quan | q van | +| que | q ve | +| qun | q vn | +| ran | r an | +| rang | r ang | +| rao | r ao | +| re | r e | +| ren | r en | +| reng | r eng | +| ri | r i | +| rong | r ong | +| rou | r ou | +| ru | r u | +| rua | r ua | +| ruan | r uan | +| rui | r ui | +| run | r un | +| ruo | r uo | +| sa | s a | +| sai | s ai | +| san | s an | +| sang | s ang | +| sao | s ao | +| se | s e | +| sen | s en | +| seng | s eng | +| sha | sh a | +| shai | sh ai | +| shan | sh an | +| shang | sh ang | +| shao | sh ao | +| she | sh e | +| shei | sh ei | +| shen | sh en | +| sheng | sh eng | +| shi | sh i | +| shou | sh ou | +| shu | sh u | +| shua | sh ua | +| shuai | sh uai | +| shuan | sh uan | +| shuang | sh uang | +| shui | sh ui | +| shun | sh un | +| shuo | sh uo | +| si | s i | +| song | s ong | +| sou | s ou | +| su | s u | +| suan | s uan | +| sui | s ui | +| sun | s un | +| suo | s uo | +| ta | t a | +| tai | t ai | +| tan | t an | +| tang | t ang | +| tao | t ao | +| te | t e | +| tei | t ei | +| teng | t eng | +| ti | t i | +| tian | t ian | +| tiao | t iao | +| tie | t ie | +| ting | t ing | +| tong | t ong | +| tou | t ou | +| tu | t u | +| tuan | t uan | +| tui | t ui | +| tun | t un | +| tuo | t uo | +| wa | w a | +| wai | w ai | +| wan | w an | +| wang | w ang | +| wei | w ei | +| wen | w en | +| weng | w eng | +| wo | w o | +| wu | w u | +| xi | x i | +| xia | x ia | +| xian | x ian | +| xiang | x iang | +| xiao | x iao | +| xie | x ie | +| xin | x in | +| xing | x ing | +| xiong | x iong | +| xiu | x iu | +| xu | x v | +| xuan | x van | +| xue | x ve | +| xun | x vn | +| ya | y a | +| yan | y an | +| yang | y ang | +| yao | y ao | +| ye | y e | +| yi | y i | +| yin | y in | +| ying | y ing | +| yo | y o | +| yong | y ong | +| you | y ou | +| yu | y v | +| yuan | y van | +| yue | y ve | +| yun | y vn | +| za | z a | +| zai | z ai | +| zan | z an | +| zang | z ang | +| zao | z ao | +| ze | z e | +| zei | z ei | +| zen | z en | +| zeng | z eng | +| zha | zh a | +| zhai | zh ai | +| zhan | zh an | +| zhang | zh ang | +| zhao | zh ao | +| zhe | zh e | +| zhei | zh ei | +| zhen | zh en | +| zheng | zh eng | +| zhi | zh i | +| zhong | zh ong | +| zhou | zh ou | +| zhu | zh u | +| zhua | zh ua | +| zhuai | zh uai | +| zhuan | zh uan | +| zhuang | zh uang | +| zhui | zh ui | +| zhun | zh un | +| zhuo | zh uo | +| zi | z i | +| zong | z ong | +| zou | z ou | +| zu | z u | +| zuan | z uan | +| zui | z ui | +| zun | z un | +| zuo | z uo | \ No newline at end of file diff --git a/NeuralSeq/inference/svs/opencpop/map.py b/NeuralSeq/inference/svs/opencpop/map.py new file mode 100644 index 0000000000000000000000000000000000000000..cf09dbfa25378e8cb098e57c3143f9d66ae00391 --- /dev/null +++ b/NeuralSeq/inference/svs/opencpop/map.py @@ -0,0 +1,8 @@ +def cpop_pinyin2ph_func(): + # In the README file of opencpop dataset, they defined a "pinyin to phoneme mapping table" + pinyin2phs = {'AP': 'AP', 'SP': 'SP'} + with open('NeuralSeq/inference/svs/opencpop/cpop_pinyin2ph.txt') as rf: + for line in rf.readlines(): + elements = [x.strip() for x in line.split('|') if x.strip() != ''] + pinyin2phs[elements[0]] = elements[1] + return pinyin2phs \ No newline at end of file diff --git a/NeuralSeq/inference/tts/GenerSpeech.py b/NeuralSeq/inference/tts/GenerSpeech.py new file mode 100644 index 0000000000000000000000000000000000000000..50527337e218248c763cf7504bc13814cca0696c --- /dev/null +++ b/NeuralSeq/inference/tts/GenerSpeech.py @@ -0,0 +1,123 @@ +import torch +import os +import importlib +from inference.tts.base_tts_infer import BaseTTSInfer +from utils.ckpt_utils import load_ckpt, get_last_checkpoint +from modules.GenerSpeech.model.generspeech import GenerSpeech +from data_gen.tts.emotion import inference as EmotionEncoder +from data_gen.tts.emotion.inference import embed_utterance as Embed_utterance +from data_gen.tts.emotion.inference import preprocess_wav +from data_gen.tts.data_gen_utils import is_sil_phoneme +from resemblyzer import VoiceEncoder +from utils import audio +class GenerSpeechInfer(BaseTTSInfer): + def build_model(self): + model = GenerSpeech(self.ph_encoder) + model.eval() + load_ckpt(model, self.hparams['work_dir'], 'model') + return model + + def preprocess_input(self, inp): + """ + :param inp: {'text': str, 'item_name': (str, optional), 'spk_name': (str, optional)} + :return: + """ + # processed text + preprocessor, preprocess_args = self.preprocessor, self.preprocess_args + text_raw = inp['text'] + item_name = inp.get('item_name', '') + ph, txt, word, ph2word, ph_gb_word = preprocessor.txt_to_ph(preprocessor.txt_processor, text_raw, preprocess_args) + ph_token = self.ph_encoder.encode(ph) + + # processed ref audio + ref_audio = inp['ref_audio'] + processed_ref_audio = 'example/temp.wav' + voice_encoder = VoiceEncoder().cuda() + encoder = [self.ph_encoder, self.word_encoder] + EmotionEncoder.load_model(self.hparams['emotion_encoder_path']) + binarizer_cls = self.hparams.get("binarizer_cls", 'data_gen.tts.base_binarizerr.BaseBinarizer') + pkg = ".".join(binarizer_cls.split(".")[:-1]) + cls_name = binarizer_cls.split(".")[-1] + binarizer_cls = getattr(importlib.import_module(pkg), cls_name) + + ref_audio_raw, ref_text_raw = self.asr(ref_audio) # prepare text + ph_ref, txt_ref, word_ref, ph2word_ref, ph_gb_word_ref = preprocessor.txt_to_ph(preprocessor.txt_processor, ref_text_raw, preprocess_args) + ph_gb_word_nosil = ["_".join([p for p in w.split("_") if not is_sil_phoneme(p)]) for w in ph_gb_word_ref.split(" ") if not is_sil_phoneme(w)] + phs_for_align = ['SIL'] + ph_gb_word_nosil + ['SIL'] + phs_for_align = " ".join(phs_for_align) + + # prepare files for alignment + os.system('rm -r example/; mkdir example/') + audio.save_wav(ref_audio_raw, processed_ref_audio, self.hparams['audio_sample_rate']) + with open(f'example/temp.lab', 'w') as f_txt: + f_txt.write(phs_for_align) + os.system(f'mfa align example/ {self.hparams["binary_data_dir"]}/mfa_dict.txt {self.hparams["binary_data_dir"]}/mfa_model.zip example/textgrid/ --clean') + item2tgfn = 'example/textgrid/temp.TextGrid' # prepare textgrid alignment + + item = binarizer_cls.process_item(item_name, ph_ref, txt_ref, item2tgfn, processed_ref_audio, 0, 0, encoder, self.hparams['binarization_args']) + item['emo_embed'] = Embed_utterance(preprocess_wav(item['wav_fn'])) + item['spk_embed'] = voice_encoder.embed_utterance(item['wav']) + + item.update({ + 'ref_ph': item['ph'], + 'ph': ph, + 'ph_token': ph_token, + 'text': txt + }) + return item + + def input_to_batch(self, item): + item_names = [item['item_name']] + text = [item['text']] + ph = [item['ph']] + + txt_tokens = torch.LongTensor(item['ph_token'])[None, :].to(self.device) + txt_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device) + mels = torch.FloatTensor(item['mel'])[None, :].to(self.device) + f0 = torch.FloatTensor(item['f0'])[None, :].to(self.device) + # uv = torch.FloatTensor(item['uv']).to(self.device) + mel2ph = torch.LongTensor(item['mel2ph'])[None, :].to(self.device) + spk_embed = torch.FloatTensor(item['spk_embed'])[None, :].to(self.device) + emo_embed = torch.FloatTensor(item['emo_embed'])[None, :].to(self.device) + + ph2word = torch.LongTensor(item['ph2word'])[None, :].to(self.device) + mel2word = torch.LongTensor(item['mel2word'])[None, :].to(self.device) + word_tokens = torch.LongTensor(item['word_tokens'])[None, :].to(self.device) + + batch = { + 'item_name': item_names, + 'text': text, + 'ph': ph, + 'mels': mels, + 'f0': f0, + 'txt_tokens': txt_tokens, + 'txt_lengths': txt_lengths, + 'spk_embed': spk_embed, + 'emo_embed': emo_embed, + 'mel2ph': mel2ph, + 'ph2word': ph2word, + 'mel2word': mel2word, + 'word_tokens': word_tokens, + } + return batch + + def forward_model(self, inp): + sample = self.input_to_batch(inp) + txt_tokens = sample['txt_tokens'] # [B, T_t] + with torch.no_grad(): + output = self.model(txt_tokens, ref_mel2ph=sample['mel2ph'], ref_mel2word=sample['mel2word'], ref_mels=sample['mels'], + spk_embed=sample['spk_embed'], emo_embed=sample['emo_embed'], global_steps=300000, infer=True) + mel_out = output['mel_out'] + wav_out = self.run_vocoder(mel_out) + wav_out = wav_out.squeeze().cpu().numpy() + return wav_out + + + + +if __name__ == '__main__': + inp = { + 'text': 'here we go', + 'ref_audio': 'assets/0011_001570.wav' + } + GenerSpeechInfer.example_run(inp) diff --git a/NeuralSeq/inference/tts/PortaSpeech.py b/NeuralSeq/inference/tts/PortaSpeech.py new file mode 100644 index 0000000000000000000000000000000000000000..1e0222284a60f7de0142851ff7ad2bf26fb5ea76 --- /dev/null +++ b/NeuralSeq/inference/tts/PortaSpeech.py @@ -0,0 +1,85 @@ +import torch +from inference.tts.base_tts_infer import BaseTTSInfer +from utils.ckpt_utils import load_ckpt +from modules.portaspeech.portaspeech import PortaSpeech + +class TTSInference(BaseTTSInfer): + def __init__(self, hparams, device=None): + super().__init__(hparams, device) + print("Initializing TTS model to %s" % device) + self.spk_map = self.preprocessor.load_spk_map(self.data_dir) + print("TTS loaded!") + + def build_model(self): + model = PortaSpeech(self.ph_encoder, self.word_encoder) + load_ckpt(model, self.hparams['work_dir'], 'model') + with torch.no_grad(): + model.store_inverse_all() + return model + + def forward_model(self, inp): + sample = self.input_to_batch(inp) + with torch.no_grad(): + output = self.model( + sample['txt_tokens'], + sample['word_tokens'], + ph2word=sample['ph2word'], + word_len=sample['word_lengths'].max(), + infer=True, + forward_post_glow=True, + spk_id=sample.get('spk_ids') + ) + mel_out = output['mel_out'] + wav_out = self.run_vocoder(mel_out) + wav_out = wav_out.cpu().numpy() + return wav_out[0] + + def preprocess_input(self, inp): + """ + + :param inp: {'text': str, 'item_name': (str, optional), 'spk_name': (str, optional)} + :return: + """ + preprocessor, preprocess_args = self.preprocessor, self.preprocess_args + text_raw = inp['text'] + item_name = inp.get('item_name', '') + spk_name = inp.get('spk_name', '') + ph, txt, word, ph2word, ph_gb_word = preprocessor.txt_to_ph( + preprocessor.txt_processor, text_raw, preprocess_args) + word_token = self.word_encoder.encode(word) + ph_token = self.ph_encoder.encode(ph) + spk_id = self.spk_map[spk_name] + item = {'item_name': item_name, 'text': txt, 'ph': ph, 'spk_id': spk_id, + 'ph_token': ph_token, 'word_token': word_token, 'ph2word': ph2word, + 'ph_words':ph_gb_word, 'words': word} + item['ph_len'] = len(item['ph_token']) + return item + + def input_to_batch(self, item): + item_names = [item['item_name']] + text = [item['text']] + ph = [item['ph']] + txt_tokens = torch.LongTensor(item['ph_token'])[None, :].to(self.device) + txt_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device) + word_tokens = torch.LongTensor(item['word_token'])[None, :].to(self.device) + word_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device) + ph2word = torch.LongTensor(item['ph2word'])[None, :].to(self.device) + spk_ids = torch.LongTensor(item['spk_id'])[None, :].to(self.device) + batch = { + 'item_name': item_names, + 'text': text, + 'ph': ph, + 'txt_tokens': txt_tokens, + 'txt_lengths': txt_lengths, + 'word_tokens': word_tokens, + 'word_lengths': word_lengths, + 'ph2word': ph2word, + 'spk_ids': spk_ids, + } + return batch + + def postprocess_output(self, output): + return output + + + diff --git a/NeuralSeq/inference/tts/__pycache__/GenerSpeech.cpython-38.pyc b/NeuralSeq/inference/tts/__pycache__/GenerSpeech.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4d9ccd43d69f738def8bffbb7970775e9dc4612 Binary files /dev/null and b/NeuralSeq/inference/tts/__pycache__/GenerSpeech.cpython-38.pyc differ diff --git a/NeuralSeq/inference/tts/__pycache__/base_tts_infer.cpython-38.pyc b/NeuralSeq/inference/tts/__pycache__/base_tts_infer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c301cff551d4e284cd908dcf4e10beb725c7cf7b Binary files /dev/null and b/NeuralSeq/inference/tts/__pycache__/base_tts_infer.cpython-38.pyc differ diff --git a/NeuralSeq/inference/tts/base_tts_infer.py b/NeuralSeq/inference/tts/base_tts_infer.py new file mode 100644 index 0000000000000000000000000000000000000000..6478f910b0dea7bd2bfac85722c775c181a0f826 --- /dev/null +++ b/NeuralSeq/inference/tts/base_tts_infer.py @@ -0,0 +1,101 @@ +from tasks.tts.dataset_utils import FastSpeechWordDataset +from tasks.tts.tts_utils import load_data_preprocessor +from vocoders.hifigan import HifiGanGenerator +import os +import librosa +import soundfile as sf +from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor +from string import punctuation +import torch +from utils.ckpt_utils import load_ckpt +from utils.hparams import set_hparams +from utils.hparams import hparams as hp + +class BaseTTSInfer: + def __init__(self, hparams, device=None): + if device is None: + device = 'cuda' if torch.cuda.is_available() else 'cpu' + self.hparams = hparams + self.device = device + self.data_dir = hparams['binary_data_dir'] + self.preprocessor, self.preprocess_args = load_data_preprocessor() + self.ph_encoder, self.word_encoder = self.preprocessor.load_dict(self.data_dir) + self.ds_cls = FastSpeechWordDataset + self.model = self.build_model() + self.model.eval() + self.model.to(self.device) + self.vocoder = self.build_vocoder() + self.vocoder.eval() + self.vocoder.to(self.device) + self.asr_processor, self.asr_model = self.build_asr() + + def build_model(self): + raise NotImplementedError + + def forward_model(self, inp): + raise NotImplementedError + + def build_asr(self): + # load pretrained model + processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") # facebook/wav2vec2-base-960h wav2vec2-large-960h-lv60-self + model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to(self.device) + return processor, model + + def build_vocoder(self): + base_dir = self.hparams['vocoder_ckpt'] + config_path = f'{base_dir}/config.yaml' + config = set_hparams(config_path, global_hparams=False) + vocoder = HifiGanGenerator(config) + load_ckpt(vocoder, base_dir, 'model_gen') + return vocoder + + def run_vocoder(self, c): + c = c.transpose(2, 1) + y = self.vocoder(c)[:, 0] + return y + + def preprocess_input(self, inp): + raise NotImplementedError + + def input_to_batch(self, item): + raise NotImplementedError + + def postprocess_output(self, output): + return output + + def infer_once(self, inp): + inp = self.preprocess_input(inp) + output = self.forward_model(inp) + output = self.postprocess_output(output) + return output + + @classmethod + def example_run(cls, inp): + from utils.audio import save_wav + + #set_hparams(print_hparams=False) + infer_ins = cls(hp) + out = infer_ins.infer_once(inp) + os.makedirs('infer_out', exist_ok=True) + save_wav(out, f'infer_out/{hp["text"]}.wav', hp['audio_sample_rate']) + print(f'Save at infer_out/{hp["text"]}.wav.') + + def asr(self, file): + sample_rate = self.hparams['audio_sample_rate'] + audio_input, source_sample_rate = sf.read(file) + + # Resample the wav if needed + if sample_rate is not None and source_sample_rate != sample_rate: + audio_input = librosa.resample(audio_input, source_sample_rate, sample_rate) + + # pad input values and return pt tensor + input_values = self.asr_processor(audio_input, sampling_rate=sample_rate, return_tensors="pt").input_values + + # retrieve logits & take argmax + logits = self.asr_model(input_values.cuda()).logits + predicted_ids = torch.argmax(logits, dim=-1) + + # transcribe + transcription = self.asr_processor.decode(predicted_ids[0]) + transcription = transcription.rstrip(punctuation) + return audio_input, transcription \ No newline at end of file diff --git a/NeuralSeq/modules/GenerSpeech/config/generspeech.yaml b/NeuralSeq/modules/GenerSpeech/config/generspeech.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b81c43a906e979475ff64039974b6a162fba4a5f --- /dev/null +++ b/NeuralSeq/modules/GenerSpeech/config/generspeech.yaml @@ -0,0 +1,101 @@ +base_config: + - egs/egs_bases/tts/fs2.yaml + - egs/datasets/audio/emotion/base_text2mel.yaml + +task_cls: modules.GenerSpeech.task.generspeech.GenerSpeechTask + +# emotion encoder +emotion_encoder_path: checkpoints/Emotion_encoder.pt # set the emotion encoder path + +# vocoder +vocoder: hifigan +vocoder_ckpt: checkpoints/trainset_hifigan + +# dataset +raw_data_dir: 'data/raw/training_set' +processed_data_dir: 'data/processed/training_set' +binary_data_dir: 'data/binary/training_set' +test_input_dir: '' + +# process +binarizer_cls: data_gen.tts.base_binarizer_emotion.EmotionBinarizer +audio_sample_rate: 16000 +hop_size: 256 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate) +win_size: 1024 # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate) +fmin: 80 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) +fmax: 7600 # To be increased/reduced depending on data. +fft_size: 1024 # Extra window size is filled with 0 paddings to match this parameter +min_level_db: -100 +ref_level_db: 20 + +binarization_args: + reset_phone_dict: true + reset_word_dict: true + shuffle: true + trim_eos_bos: false + trim_sil: false + with_align: true + with_f0: true + with_f0cwt: false + with_linear: false + with_spk_embed: true + with_spk_id: true + with_txt: true + with_wav: true + with_word: true + +preprocess_cls: egs.datasets.audio.libritts.pre_align.LibrittsPreAlign +preprocess_args: + nsample_per_mfa_group: 1000 + # text process + txt_processor: en + use_mfa: true + with_phsep: true + reset_phone_dict: true + reset_word_dict: true + add_eos_bos: true + # mfa + mfa_group_shuffle: false + mfa_offset: 0.02 + # wav processors + wav_processors: [] + save_sil_mask: true + vad_max_silence_length: 12 + +# data +word_dict_size: 10000 +num_spk: 500 +use_spk_embed: true +use_spk_id: false +use_word: true +use_emotion: true +use_gt_dur: false +ref_audio: '' +text: '' + +# training +num_sanity_val_steps: -1 +max_updates: 300000 +max_sentences: 100000 +num_test_samples: 72 + +## glow +post_glow_hidden: 128 +post_glow_kernel_size: 3 +post_glow_n_blocks: 8 +post_glow_n_block_layers: 3 +share_wn_layers: 4 +sigmoid_scale: false +post_share_cond_layers: false +use_txt_cond: true +use_latent_cond: true +noise_scale: 0.8 + +# prosody extractor +lambda_commit: 0.25 +vq_start: 20500 +vae_dropout: 0.0 +nVQ: 128 +forcing: 20000 +crop: false +predictor_grad: 1.0 \ No newline at end of file diff --git a/NeuralSeq/modules/GenerSpeech/model/__pycache__/generspeech.cpython-38.pyc b/NeuralSeq/modules/GenerSpeech/model/__pycache__/generspeech.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..155e678c96216b8f392c12c2c1f28cea523c8ff0 Binary files /dev/null and b/NeuralSeq/modules/GenerSpeech/model/__pycache__/generspeech.cpython-38.pyc differ diff --git a/NeuralSeq/modules/GenerSpeech/model/__pycache__/glow_modules.cpython-38.pyc b/NeuralSeq/modules/GenerSpeech/model/__pycache__/glow_modules.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..644cd47735290ac278c0c4e884d212ca6c1d4841 Binary files /dev/null and b/NeuralSeq/modules/GenerSpeech/model/__pycache__/glow_modules.cpython-38.pyc differ diff --git a/NeuralSeq/modules/GenerSpeech/model/__pycache__/mixstyle.cpython-38.pyc b/NeuralSeq/modules/GenerSpeech/model/__pycache__/mixstyle.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29bd4484cd6cbd7acca6cd82c910f20e17853718 Binary files /dev/null and b/NeuralSeq/modules/GenerSpeech/model/__pycache__/mixstyle.cpython-38.pyc differ diff --git a/NeuralSeq/modules/GenerSpeech/model/__pycache__/prosody_util.cpython-38.pyc b/NeuralSeq/modules/GenerSpeech/model/__pycache__/prosody_util.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bd5e5b11464c818dc8f6b3824449ee86baf732f Binary files /dev/null and b/NeuralSeq/modules/GenerSpeech/model/__pycache__/prosody_util.cpython-38.pyc differ diff --git a/NeuralSeq/modules/GenerSpeech/model/__pycache__/wavenet.cpython-38.pyc b/NeuralSeq/modules/GenerSpeech/model/__pycache__/wavenet.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c74e240179e920db0c626ac6e6246839536f09e Binary files /dev/null and b/NeuralSeq/modules/GenerSpeech/model/__pycache__/wavenet.cpython-38.pyc differ diff --git a/NeuralSeq/modules/GenerSpeech/model/generspeech.py b/NeuralSeq/modules/GenerSpeech/model/generspeech.py new file mode 100644 index 0000000000000000000000000000000000000000..d6ee09c417ae8ac8ce6e3f02a60eea36f4f4ba05 --- /dev/null +++ b/NeuralSeq/modules/GenerSpeech/model/generspeech.py @@ -0,0 +1,260 @@ +import torch +from modules.GenerSpeech.model.glow_modules import Glow +from modules.fastspeech.tts_modules import PitchPredictor +import random +from modules.GenerSpeech.model.prosody_util import ProsodyAligner, LocalStyleAdaptor +from utils.pitch_utils import f0_to_coarse, denorm_f0 +from modules.commons.common_layers import * +import torch.distributions as dist +from utils.hparams import hparams +from modules.GenerSpeech.model.mixstyle import MixStyle +from modules.fastspeech.fs2 import FastSpeech2 +import json +from modules.fastspeech.tts_modules import DEFAULT_MAX_SOURCE_POSITIONS, DEFAULT_MAX_TARGET_POSITIONS + +class GenerSpeech(FastSpeech2): + ''' + GenerSpeech: Towards Style Transfer for Generalizable Out-Of-Domain Text-to-Speech + https://arxiv.org/abs/2205.07211 + ''' + def __init__(self, dictionary, out_dims=None): + super().__init__(dictionary, out_dims) + + # Mixstyle + self.norm = MixStyle(p=0.5, alpha=0.1, eps=1e-6, hidden_size=self.hidden_size) + + # emotion embedding + self.emo_embed_proj = Linear(256, self.hidden_size, bias=True) + + # build prosody extractor + ## frame level + self.prosody_extractor_utter = LocalStyleAdaptor(self.hidden_size, hparams['nVQ'], self.padding_idx) + self.l1_utter = nn.Linear(self.hidden_size * 2, self.hidden_size) + self.align_utter = ProsodyAligner(num_layers=2) + + ## phoneme level + self.prosody_extractor_ph = LocalStyleAdaptor(self.hidden_size, hparams['nVQ'], self.padding_idx) + self.l1_ph = nn.Linear(self.hidden_size * 2, self.hidden_size) + self.align_ph = ProsodyAligner(num_layers=2) + + ## word level + self.prosody_extractor_word = LocalStyleAdaptor(self.hidden_size, hparams['nVQ'], self.padding_idx) + self.l1_word = nn.Linear(self.hidden_size * 2, self.hidden_size) + self.align_word = ProsodyAligner(num_layers=2) + + self.pitch_inpainter_predictor = PitchPredictor( + self.hidden_size, n_chans=self.hidden_size, + n_layers=3, dropout_rate=0.1, odim=2, + padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel']) + + # build attention layer + self.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS + self.embed_positions = SinusoidalPositionalEmbedding( + self.hidden_size, self.padding_idx, + init_size=self.max_source_positions + self.padding_idx + 1, + ) + + # build post flow + cond_hs = 80 + if hparams.get('use_txt_cond', True): + cond_hs = cond_hs + hparams['hidden_size'] + + cond_hs = cond_hs + hparams['hidden_size'] * 3 # for emo, spk embedding and prosody embedding + self.post_flow = Glow( + 80, hparams['post_glow_hidden'], hparams['post_glow_kernel_size'], 1, + hparams['post_glow_n_blocks'], hparams['post_glow_n_block_layers'], + n_split=4, n_sqz=2, + gin_channels=cond_hs, + share_cond_layers=hparams['post_share_cond_layers'], + share_wn_layers=hparams['share_wn_layers'], + sigmoid_scale=hparams['sigmoid_scale'] + ) + self.prior_dist = dist.Normal(0, 1) + + + def forward(self, txt_tokens, mel2ph=None, ref_mel2ph=None, ref_mel2word=None, spk_embed=None, emo_embed=None, ref_mels=None, + f0=None, uv=None, skip_decoder=False, global_steps=0, infer=False, **kwargs): + ret = {} + encoder_out = self.encoder(txt_tokens) # [B, T, C] + src_nonpadding = (txt_tokens > 0).float()[:, :, None] + + # add spk/emo embed + spk_embed = self.spk_embed_proj(spk_embed)[:, None, :] + emo_embed = self.emo_embed_proj(emo_embed)[:, None, :] + + + # add dur + dur_inp = (encoder_out + spk_embed + emo_embed) * src_nonpadding + mel2ph = self.add_dur(dur_inp, mel2ph, txt_tokens, ret) + tgt_nonpadding = (mel2ph > 0).float()[:, :, None] + decoder_inp = self.expand_states(encoder_out, mel2ph) + decoder_inp = self.norm(decoder_inp, spk_embed + emo_embed) + + # add prosody VQ + ret['ref_mel2ph'] = ref_mel2ph + ret['ref_mel2word'] = ref_mel2word + prosody_utter_mel = self.get_prosody_utter(decoder_inp, ref_mels, ret, infer, global_steps) + prosody_ph_mel = self.get_prosody_ph(decoder_inp, ref_mels, ret, infer, global_steps) + prosody_word_mel = self.get_prosody_word(decoder_inp, ref_mels, ret, infer, global_steps) + + # add pitch embed + pitch_inp_domain_agnostic = decoder_inp * tgt_nonpadding + pitch_inp_domain_specific = (decoder_inp + spk_embed + emo_embed + prosody_utter_mel + prosody_ph_mel + prosody_word_mel) * tgt_nonpadding + predicted_pitch = self.inpaint_pitch(pitch_inp_domain_agnostic, pitch_inp_domain_specific, f0, uv, mel2ph, ret) + + # decode + decoder_inp = decoder_inp + spk_embed + emo_embed + predicted_pitch + prosody_utter_mel + prosody_ph_mel + prosody_word_mel + ret['decoder_inp'] = decoder_inp = decoder_inp * tgt_nonpadding + if skip_decoder: + return ret + ret['mel_out'] = self.run_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs) + + # postflow + is_training = self.training + ret['x_mask'] = tgt_nonpadding + ret['spk_embed'] = spk_embed + ret['emo_embed'] = emo_embed + ret['ref_prosody'] = prosody_utter_mel + prosody_ph_mel + prosody_word_mel + self.run_post_glow(ref_mels, infer, is_training, ret) + return ret + + def get_prosody_ph(self, encoder_out, ref_mels, ret, infer=False, global_steps=0): + # get VQ prosody + if global_steps > hparams['vq_start'] or infer: + prosody_embedding, loss, ppl = self.prosody_extractor_ph(ref_mels, ret['ref_mel2ph'], no_vq=False) + ret['vq_loss_ph'] = loss + ret['ppl_ph'] = ppl + else: + prosody_embedding = self.prosody_extractor_ph(ref_mels, ret['ref_mel2ph'], no_vq=True) + + # add positional embedding + positions = self.embed_positions(prosody_embedding[:, :, 0]) + prosody_embedding = self.l1_ph(torch.cat([prosody_embedding, positions], dim=-1)) + + + # style-to-content attention + src_key_padding_mask = encoder_out[:, :, 0].eq(self.padding_idx).data + prosody_key_padding_mask = prosody_embedding[:, :, 0].eq(self.padding_idx).data + if global_steps < hparams['forcing']: + output, guided_loss, attn_emo = self.align_ph(encoder_out.transpose(0, 1), prosody_embedding.transpose(0, 1), + src_key_padding_mask, prosody_key_padding_mask, forcing=True) + else: + output, guided_loss, attn_emo = self.align_ph(encoder_out.transpose(0, 1), prosody_embedding.transpose(0, 1), + src_key_padding_mask, prosody_key_padding_mask, forcing=False) + + ret['gloss_ph'] = guided_loss + ret['attn_ph'] = attn_emo + return output.transpose(0, 1) + + def get_prosody_word(self, encoder_out, ref_mels, ret, infer=False, global_steps=0): + # get VQ prosody + if global_steps > hparams['vq_start'] or infer: + prosody_embedding, loss, ppl = self.prosody_extractor_word(ref_mels, ret['ref_mel2word'], no_vq=False) + ret['vq_loss_word'] = loss + ret['ppl_word'] = ppl + else: + prosody_embedding = self.prosody_extractor_word(ref_mels, ret['ref_mel2word'], no_vq=True) + + # add positional embedding + positions = self.embed_positions(prosody_embedding[:, :, 0]) + prosody_embedding = self.l1_word(torch.cat([prosody_embedding, positions], dim=-1)) + + + # style-to-content attention + src_key_padding_mask = encoder_out[:, :, 0].eq(self.padding_idx).data + prosody_key_padding_mask = prosody_embedding[:, :, 0].eq(self.padding_idx).data + if global_steps < hparams['forcing']: + output, guided_loss, attn_emo = self.align_word(encoder_out.transpose(0, 1), prosody_embedding.transpose(0, 1), + src_key_padding_mask, prosody_key_padding_mask, forcing=True) + else: + output, guided_loss, attn_emo = self.align_word(encoder_out.transpose(0, 1), prosody_embedding.transpose(0, 1), + src_key_padding_mask, prosody_key_padding_mask, forcing=False) + ret['gloss_word'] = guided_loss + ret['attn_word'] = attn_emo + return output.transpose(0, 1) + + def get_prosody_utter(self, encoder_out, ref_mels, ret, infer=False, global_steps=0): + # get VQ prosody + if global_steps > hparams['vq_start'] or infer: + prosody_embedding, loss, ppl = self.prosody_extractor_utter(ref_mels, no_vq=False) + ret['vq_loss_utter'] = loss + ret['ppl_utter'] = ppl + else: + prosody_embedding = self.prosody_extractor_utter(ref_mels, no_vq=True) + + # add positional embedding + positions = self.embed_positions(prosody_embedding[:, :, 0]) + prosody_embedding = self.l1_utter(torch.cat([prosody_embedding, positions], dim=-1)) + + + # style-to-content attention + src_key_padding_mask = encoder_out[:, :, 0].eq(self.padding_idx).data + prosody_key_padding_mask = prosody_embedding[:, :, 0].eq(self.padding_idx).data + if global_steps < hparams['forcing']: + output, guided_loss, attn_emo = self.align_utter(encoder_out.transpose(0, 1), prosody_embedding.transpose(0, 1), + src_key_padding_mask, prosody_key_padding_mask, forcing=True) + else: + output, guided_loss, attn_emo = self.align_utter(encoder_out.transpose(0, 1), prosody_embedding.transpose(0, 1), + src_key_padding_mask, prosody_key_padding_mask, forcing=False) + ret['gloss_utter'] = guided_loss + ret['attn_utter'] = attn_emo + return output.transpose(0, 1) + + + + def inpaint_pitch(self, pitch_inp_domain_agnostic, pitch_inp_domain_specific, f0, uv, mel2ph, ret): + if hparams['pitch_type'] == 'frame': + pitch_padding = mel2ph == 0 + if hparams['predictor_grad'] != 1: + pitch_inp_domain_agnostic = pitch_inp_domain_agnostic.detach() + hparams['predictor_grad'] * (pitch_inp_domain_agnostic - pitch_inp_domain_agnostic.detach()) + pitch_inp_domain_specific = pitch_inp_domain_specific.detach() + hparams['predictor_grad'] * (pitch_inp_domain_specific - pitch_inp_domain_specific.detach()) + + pitch_domain_agnostic = self.pitch_predictor(pitch_inp_domain_agnostic) + pitch_domain_specific = self.pitch_inpainter_predictor(pitch_inp_domain_specific) + pitch_pred = pitch_domain_agnostic + pitch_domain_specific + ret['pitch_pred'] = pitch_pred + + use_uv = hparams['pitch_type'] == 'frame' and hparams['use_uv'] + if f0 is None: + f0 = pitch_pred[:, :, 0] # [B, T] + if use_uv: + uv = pitch_pred[:, :, 1] > 0 # [B, T] + f0_denorm = denorm_f0(f0, uv if use_uv else None, hparams, pitch_padding=pitch_padding) + pitch = f0_to_coarse(f0_denorm) # start from 0 [B, T_txt] + ret['f0_denorm'] = f0_denorm + ret['f0_denorm_pred'] = denorm_f0(pitch_pred[:, :, 0], (pitch_pred[:, :, 1] > 0) if use_uv else None, hparams, pitch_padding=pitch_padding) + if hparams['pitch_type'] == 'ph': + pitch = torch.gather(F.pad(pitch, [1, 0]), 1, mel2ph) + ret['f0_denorm'] = torch.gather(F.pad(ret['f0_denorm'], [1, 0]), 1, mel2ph) + ret['f0_denorm_pred'] = torch.gather(F.pad(ret['f0_denorm_pred'], [1, 0]), 1, mel2ph) + pitch_embed = self.pitch_embed(pitch) + return pitch_embed + + def run_post_glow(self, tgt_mels, infer, is_training, ret): + x_recon = ret['mel_out'].transpose(1, 2) + g = x_recon + B, _, T = g.shape + if hparams.get('use_txt_cond', True): + g = torch.cat([g, ret['decoder_inp'].transpose(1, 2)], 1) + g_spk_embed = ret['spk_embed'].repeat(1, T, 1).transpose(1, 2) + g_emo_embed = ret['emo_embed'].repeat(1, T, 1).transpose(1, 2) + l_ref_prosody = ret['ref_prosody'].transpose(1, 2) + g = torch.cat([g, g_spk_embed, g_emo_embed, l_ref_prosody], dim=1) + prior_dist = self.prior_dist + if not infer: + if is_training: + self.train() + x_mask = ret['x_mask'].transpose(1, 2) + y_lengths = x_mask.sum(-1) + g = g.detach() + tgt_mels = tgt_mels.transpose(1, 2) + z_postflow, ldj = self.post_flow(tgt_mels, x_mask, g=g) + ldj = ldj / y_lengths / 80 + ret['z_pf'], ret['ldj_pf'] = z_postflow, ldj + ret['postflow'] = -prior_dist.log_prob(z_postflow).mean() - ldj.mean() + else: + x_mask = torch.ones_like(x_recon[:, :1, :]) + z_post = prior_dist.sample(x_recon.shape).to(g.device) * hparams['noise_scale'] + x_recon_, _ = self.post_flow(z_post, x_mask, g, reverse=True) + x_recon = x_recon_ + ret['mel_out'] = x_recon.transpose(1, 2) \ No newline at end of file diff --git a/NeuralSeq/modules/GenerSpeech/model/glow_modules.py b/NeuralSeq/modules/GenerSpeech/model/glow_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..62d0d5a2884178806275d17c486e62a636d1732a --- /dev/null +++ b/NeuralSeq/modules/GenerSpeech/model/glow_modules.py @@ -0,0 +1,767 @@ +import scipy +from torch.nn import functional as F +import torch +from torch import nn +import numpy as np +from modules.commons.common_layers import Permute +from modules.fastspeech.tts_modules import FFTBlocks +from modules.GenerSpeech.model.wavenet import fused_add_tanh_sigmoid_multiply, WN + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-4): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + n_dims = len(x.shape) + mean = torch.mean(x, 1, keepdim=True) + variance = torch.mean((x - mean) ** 2, 1, keepdim=True) + + x = (x - mean) * torch.rsqrt(variance + self.eps) + + shape = [1, -1] + [1] * (n_dims - 2) + x = x * self.gamma.view(*shape) + self.beta.view(*shape) + return x + + +class ConvReluNorm(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + assert n_layers > 1, "Number of layers should be larger than 0." + + self.conv_layers = nn.ModuleList() + self.norm_layers = nn.ModuleList() + self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.relu_drop = nn.Sequential( + nn.ReLU(), + nn.Dropout(p_dropout)) + for _ in range(n_layers - 1): + self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask): + x_org = x + for i in range(self.n_layers): + x = self.conv_layers[i](x * x_mask) + x = self.norm_layers[i](x) + x = self.relu_drop(x) + x = x_org + self.proj(x) + return x * x_mask + + + +class ActNorm(nn.Module): # glow中的线性变换层 + def __init__(self, channels, ddi=False, **kwargs): + super().__init__() + self.channels = channels + self.initialized = not ddi + + self.logs = nn.Parameter(torch.zeros(1, channels, 1)) + self.bias = nn.Parameter(torch.zeros(1, channels, 1)) + + def forward(self, x, x_mask=None, reverse=False, **kwargs): + if x_mask is None: + x_mask = torch.ones(x.size(0), 1, x.size(2)).to(device=x.device, dtype=x.dtype) + x_len = torch.sum(x_mask, [1, 2]) + if not self.initialized: + self.initialize(x, x_mask) + self.initialized = True + + if reverse: + z = (x - self.bias) * torch.exp(-self.logs) * x_mask + logdet = torch.sum(-self.logs) * x_len + else: + z = (self.bias + torch.exp(self.logs) * x) * x_mask + logdet = torch.sum(self.logs) * x_len # [b] + return z, logdet + + def store_inverse(self): + pass + + def set_ddi(self, ddi): + self.initialized = not ddi + + def initialize(self, x, x_mask): + with torch.no_grad(): + denom = torch.sum(x_mask, [0, 2]) + m = torch.sum(x * x_mask, [0, 2]) / denom + m_sq = torch.sum(x * x * x_mask, [0, 2]) / denom + v = m_sq - (m ** 2) + logs = 0.5 * torch.log(torch.clamp_min(v, 1e-6)) + + bias_init = (-m * torch.exp(-logs)).view(*self.bias.shape).to(dtype=self.bias.dtype) + logs_init = (-logs).view(*self.logs.shape).to(dtype=self.logs.dtype) + + self.bias.data.copy_(bias_init) + self.logs.data.copy_(logs_init) + + +class InvConvNear(nn.Module): # 可逆卷积 + def __init__(self, channels, n_split=4, no_jacobian=False, lu=True, n_sqz=2, **kwargs): + super().__init__() + assert (n_split % 2 == 0) + self.channels = channels + self.n_split = n_split + self.n_sqz = n_sqz + self.no_jacobian = no_jacobian + + w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split).normal_())[0] + if torch.det(w_init) < 0: + w_init[:, 0] = -1 * w_init[:, 0] + self.lu = lu + if lu: + # LU decomposition can slightly speed up the inverse + np_p, np_l, np_u = scipy.linalg.lu(w_init) + np_s = np.diag(np_u) + np_sign_s = np.sign(np_s) + np_log_s = np.log(np.abs(np_s)) + np_u = np.triu(np_u, k=1) + l_mask = np.tril(np.ones(w_init.shape, dtype=float), -1) + eye = np.eye(*w_init.shape, dtype=float) + + self.register_buffer('p', torch.Tensor(np_p.astype(float))) + self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(float))) + self.l = nn.Parameter(torch.Tensor(np_l.astype(float)), requires_grad=True) + self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(float)), requires_grad=True) + self.u = nn.Parameter(torch.Tensor(np_u.astype(float)), requires_grad=True) + self.register_buffer('l_mask', torch.Tensor(l_mask)) + self.register_buffer('eye', torch.Tensor(eye)) + else: + self.weight = nn.Parameter(w_init) + + def forward(self, x, x_mask=None, reverse=False, **kwargs): + b, c, t = x.size() + assert (c % self.n_split == 0) + if x_mask is None: + x_mask = 1 + x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t + else: + x_len = torch.sum(x_mask, [1, 2]) + + x = x.view(b, self.n_sqz, c // self.n_split, self.n_split // self.n_sqz, t) + x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.n_split, c // self.n_split, t) + + if self.lu: + self.weight, log_s = self._get_weight() + logdet = log_s.sum() + logdet = logdet * (c / self.n_split) * x_len + else: + logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len # [b] + + if reverse: + if hasattr(self, "weight_inv"): + weight = self.weight_inv + else: + weight = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype) + logdet = -logdet + else: + weight = self.weight + if self.no_jacobian: + logdet = 0 + + weight = weight.view(self.n_split, self.n_split, 1, 1) + z = F.conv2d(x, weight) + + z = z.view(b, self.n_sqz, self.n_split // self.n_sqz, c // self.n_split, t) + z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask + return z, logdet + + def _get_weight(self): + l, log_s, u = self.l, self.log_s, self.u + l = l * self.l_mask + self.eye + u = u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(log_s)) + weight = torch.matmul(self.p, torch.matmul(l, u)) + return weight, log_s + + def store_inverse(self): + weight, _ = self._get_weight() + self.weight_inv = torch.inverse(weight.float()).to(next(self.parameters()).device) + + +class InvConv(nn.Module): + def __init__(self, channels, no_jacobian=False, lu=True, **kwargs): + super().__init__() + w_shape = [channels, channels] + w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(float) + LU_decomposed = lu + if not LU_decomposed: + # Sample a random orthogonal matrix: + self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init))) + else: + np_p, np_l, np_u = scipy.linalg.lu(w_init) + np_s = np.diag(np_u) + np_sign_s = np.sign(np_s) + np_log_s = np.log(np.abs(np_s)) + np_u = np.triu(np_u, k=1) + l_mask = np.tril(np.ones(w_shape, dtype=float), -1) + eye = np.eye(*w_shape, dtype=float) + + self.register_buffer('p', torch.Tensor(np_p.astype(float))) + self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(float))) + self.l = nn.Parameter(torch.Tensor(np_l.astype(float))) + self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(float))) + self.u = nn.Parameter(torch.Tensor(np_u.astype(float))) + self.l_mask = torch.Tensor(l_mask) + self.eye = torch.Tensor(eye) + self.w_shape = w_shape + self.LU = LU_decomposed + self.weight = None + + def get_weight(self, device, reverse): + w_shape = self.w_shape + self.p = self.p.to(device) + self.sign_s = self.sign_s.to(device) + self.l_mask = self.l_mask.to(device) + self.eye = self.eye.to(device) + l = self.l * self.l_mask + self.eye + u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(self.log_s)) + dlogdet = self.log_s.sum() + if not reverse: + w = torch.matmul(self.p, torch.matmul(l, u)) + else: + l = torch.inverse(l.double()).float() + u = torch.inverse(u.double()).float() + w = torch.matmul(u, torch.matmul(l, self.p.inverse())) + return w.view(w_shape[0], w_shape[1], 1), dlogdet + + def forward(self, x, x_mask=None, reverse=False, **kwargs): + """ + log-det = log|abs(|W|)| * pixels + """ + b, c, t = x.size() + if x_mask is None: + x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t + else: + x_len = torch.sum(x_mask, [1, 2]) + logdet = 0 + if not reverse: + weight, dlogdet = self.get_weight(x.device, reverse) + z = F.conv1d(x, weight) + if logdet is not None: + logdet = logdet + dlogdet * x_len + return z, logdet + else: + if self.weight is None: + weight, dlogdet = self.get_weight(x.device, reverse) + else: + weight, dlogdet = self.weight, self.dlogdet + z = F.conv1d(x, weight) + if logdet is not None: + logdet = logdet - dlogdet * x_len + return z, logdet + + def store_inverse(self): + self.weight, self.dlogdet = self.get_weight('cuda', reverse=True) + + +class Flip(nn.Module): + def forward(self, x, *args, reverse=False, **kwargs): + x = torch.flip(x, [1]) + logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) + return x, logdet + + def store_inverse(self): + pass + + +class CouplingBlock(nn.Module): # 仿射耦合层 + def __init__(self, in_channels, hidden_channels, kernel_size, dilation_rate, n_layers, + gin_channels=0, p_dropout=0, sigmoid_scale=False, + share_cond_layers=False, wn=None): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + self.sigmoid_scale = sigmoid_scale + + start = torch.nn.Conv1d(in_channels // 2, hidden_channels, 1) + start = torch.nn.utils.weight_norm(start) + self.start = start + # Initializing last layer to 0 makes the affine coupling layers + # do nothing at first. This helps with training stability + end = torch.nn.Conv1d(hidden_channels, in_channels, 1) + end.weight.data.zero_() + end.bias.data.zero_() + self.end = end + self.wn = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels, + p_dropout, share_cond_layers) + if wn is not None: + self.wn.in_layers = wn.in_layers + self.wn.res_skip_layers = wn.res_skip_layers + + def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs): + if x_mask is None: + x_mask = 1 + x_0, x_1 = x[:, :self.in_channels // 2], x[:, self.in_channels // 2:] + + x = self.start(x_0) * x_mask + x = self.wn(x, x_mask, g) + out = self.end(x) + + z_0 = x_0 + m = out[:, :self.in_channels // 2, :] + logs = out[:, self.in_channels // 2:, :] + if self.sigmoid_scale: + logs = torch.log(1e-6 + torch.sigmoid(logs + 2)) + if reverse: + z_1 = (x_1 - m) * torch.exp(-logs) * x_mask + logdet = torch.sum(-logs * x_mask, [1, 2]) + else: + z_1 = (m + torch.exp(logs) * x_1) * x_mask + logdet = torch.sum(logs * x_mask, [1, 2]) + z = torch.cat([z_0, z_1], 1) + return z, logdet + + def store_inverse(self): + self.wn.remove_weight_norm() + + +class GlowFFTBlocks(FFTBlocks): + def __init__(self, hidden_size=128, gin_channels=256, num_layers=2, ffn_kernel_size=5, + dropout=None, num_heads=4, use_pos_embed=True, use_last_norm=True, + norm='ln', use_pos_embed_alpha=True): + super().__init__(hidden_size, num_layers, ffn_kernel_size, dropout, num_heads, use_pos_embed, + use_last_norm, norm, use_pos_embed_alpha) + self.inp_proj = nn.Conv1d(hidden_size + gin_channels, hidden_size, 1) + + def forward(self, x, x_mask=None, g=None): + """ + :param x: [B, C_x, T] + :param x_mask: [B, 1, T] + :param g: [B, C_g, T] + :return: [B, C_x, T] + """ + if g is not None: + x = self.inp_proj(torch.cat([x, g], 1)) + x = x.transpose(1, 2) + x = super(GlowFFTBlocks, self).forward(x, x_mask[:, 0] == 0) + x = x.transpose(1, 2) + return x + + +class TransformerCouplingBlock(nn.Module): + def __init__(self, in_channels, hidden_channels, n_layers, + gin_channels=0, p_dropout=0, sigmoid_scale=False): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + self.sigmoid_scale = sigmoid_scale + + start = torch.nn.Conv1d(in_channels // 2, hidden_channels, 1) + self.start = start + # Initializing last layer to 0 makes the affine coupling layers + # do nothing at first. This helps with training stability + end = torch.nn.Conv1d(hidden_channels, in_channels, 1) + end.weight.data.zero_() + end.bias.data.zero_() + self.end = end + self.fft_blocks = GlowFFTBlocks( + hidden_size=hidden_channels, + ffn_kernel_size=3, + gin_channels=gin_channels, + num_layers=n_layers) + + def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs): + if x_mask is None: + x_mask = 1 + x_0, x_1 = x[:, :self.in_channels // 2], x[:, self.in_channels // 2:] + + x = self.start(x_0) * x_mask + x = self.fft_blocks(x, x_mask, g) + out = self.end(x) + + z_0 = x_0 + m = out[:, :self.in_channels // 2, :] + logs = out[:, self.in_channels // 2:, :] + if self.sigmoid_scale: + logs = torch.log(1e-6 + torch.sigmoid(logs + 2)) + if reverse: + z_1 = (x_1 - m) * torch.exp(-logs) * x_mask + logdet = torch.sum(-logs * x_mask, [1, 2]) + else: + z_1 = (m + torch.exp(logs) * x_1) * x_mask + logdet = torch.sum(logs * x_mask, [1, 2]) + z = torch.cat([z_0, z_1], 1) + return z, logdet + + def store_inverse(self): + pass + + +class FreqFFTCouplingBlock(nn.Module): + def __init__(self, in_channels, hidden_channels, n_layers, + gin_channels=0, p_dropout=0, sigmoid_scale=False): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + self.sigmoid_scale = sigmoid_scale + + hs = hidden_channels + stride = 8 + self.start = torch.nn.Conv2d(3, hs, kernel_size=stride * 2, + stride=stride, padding=stride // 2) + end = nn.ConvTranspose2d(hs, 2, kernel_size=stride, stride=stride) + end.weight.data.zero_() + end.bias.data.zero_() + self.end = nn.Sequential( + nn.Conv2d(hs * 3, hs, 3, 1, 1), + nn.ReLU(), + nn.GroupNorm(4, hs), + nn.Conv2d(hs, hs, 3, 1, 1), + end + ) + self.fft_v = FFTBlocks(hidden_size=hs, ffn_kernel_size=1, num_layers=n_layers) + self.fft_h = nn.Sequential( + nn.Conv1d(hs, hs, 3, 1, 1), + nn.ReLU(), + nn.Conv1d(hs, hs, 3, 1, 1), + ) + self.fft_g = nn.Sequential( + nn.Conv1d( + gin_channels - 160, hs, kernel_size=stride * 2, stride=stride, padding=stride // 2), + Permute(0, 2, 1), + FFTBlocks(hidden_size=hs, ffn_kernel_size=1, num_layers=n_layers), + Permute(0, 2, 1), + ) + + def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs): + g_, _ = unsqueeze(g) + g_mel = g_[:, :80] + g_txt = g_[:, 80:] + g_mel, _ = squeeze(g_mel) + g_txt, _ = squeeze(g_txt) # [B, C, T] + + if x_mask is None: + x_mask = 1 + x_0, x_1 = x[:, :self.in_channels // 2], x[:, self.in_channels // 2:] + x = torch.stack([x_0, g_mel[:, :80], g_mel[:, 80:]], 1) + x = self.start(x) # [B, C, N_bins, T] + B, C, N_bins, T = x.shape + + x_v = self.fft_v(x.permute(0, 3, 2, 1).reshape(B * T, N_bins, C)) + x_v = x_v.reshape(B, T, N_bins, -1).permute(0, 3, 2, 1) + # x_v = x + + x_h = self.fft_h(x.permute(0, 2, 1, 3).reshape(B * N_bins, C, T)) + x_h = x_h.reshape(B, N_bins, -1, T).permute(0, 2, 1, 3) + # x_h = x + + x_g = self.fft_g(g_txt)[:, :, None, :].repeat(1, 1, 10, 1) + x = torch.cat([x_v, x_h, x_g], 1) + out = self.end(x) + + z_0 = x_0 + m = out[:, 0] + logs = out[:, 1] + if self.sigmoid_scale: + logs = torch.log(1e-6 + torch.sigmoid(logs + 2)) + if reverse: + z_1 = (x_1 - m) * torch.exp(-logs) * x_mask + logdet = torch.sum(-logs * x_mask, [1, 2]) + else: + z_1 = (m + torch.exp(logs) * x_1) * x_mask + logdet = torch.sum(logs * x_mask, [1, 2]) + z = torch.cat([z_0, z_1], 1) + return z, logdet + + def store_inverse(self): + pass + + +class Glow(nn.Module): + def __init__(self, + in_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_blocks, + n_layers, + p_dropout=0., + n_split=4, + n_sqz=2, + sigmoid_scale=False, + gin_channels=0, + inv_conv_type='near', + share_cond_layers=False, + share_wn_layers=0, + ): + super().__init__() + + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_blocks = n_blocks + self.n_layers = n_layers + self.p_dropout = p_dropout + self.n_split = n_split + self.n_sqz = n_sqz + self.sigmoid_scale = sigmoid_scale + self.gin_channels = gin_channels + self.share_cond_layers = share_cond_layers + if gin_channels != 0 and share_cond_layers: + cond_layer = torch.nn.Conv1d(gin_channels * n_sqz, 2 * hidden_channels * n_layers, 1) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') + wn = None + self.flows = nn.ModuleList() + for b in range(n_blocks): + self.flows.append(ActNorm(channels=in_channels * n_sqz)) + if inv_conv_type == 'near': + self.flows.append(InvConvNear(channels=in_channels * n_sqz, n_split=n_split, n_sqz=n_sqz)) + if inv_conv_type == 'invconv': + self.flows.append(InvConv(channels=in_channels * n_sqz)) + if share_wn_layers > 0: + if b % share_wn_layers == 0: + wn = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels * n_sqz, + p_dropout, share_cond_layers) + self.flows.append( + CouplingBlock( + in_channels * n_sqz, + hidden_channels, + kernel_size=kernel_size, + dilation_rate=dilation_rate, + n_layers=n_layers, + gin_channels=gin_channels * n_sqz, + p_dropout=p_dropout, + sigmoid_scale=sigmoid_scale, + share_cond_layers=share_cond_layers, + wn=wn + )) + + def forward(self, x, x_mask=None, g=None, reverse=False, return_hiddens=False): + logdet_tot = 0 + if not reverse: + flows = self.flows + else: + flows = reversed(self.flows) + if return_hiddens: + hs = [] + if self.n_sqz > 1: + x, x_mask_ = squeeze(x, x_mask, self.n_sqz) + if g is not None: + g, _ = squeeze(g, x_mask, self.n_sqz) + x_mask = x_mask_ + if self.share_cond_layers and g is not None: + g = self.cond_layer(g) + for f in flows: + x, logdet = f(x, x_mask, g=g, reverse=reverse) + if return_hiddens: + hs.append(x) + logdet_tot += logdet + if self.n_sqz > 1: + x, x_mask = unsqueeze(x, x_mask, self.n_sqz) + if return_hiddens: + return x, logdet_tot, hs + return x, logdet_tot + + def store_inverse(self): + def remove_weight_norm(m): + try: + nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(remove_weight_norm) + for f in self.flows: + f.store_inverse() + + +class GlowV2(nn.Module): + def __init__(self, + in_channels=256, + hidden_channels=256, + kernel_size=3, + dilation_rate=1, + n_blocks=8, + n_layers=4, + p_dropout=0., + n_split=4, + n_split_blocks=3, + sigmoid_scale=False, + gin_channels=0, + share_cond_layers=True): + super().__init__() + + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_blocks = n_blocks + self.n_layers = n_layers + self.p_dropout = p_dropout + self.n_split = n_split + self.n_split_blocks = n_split_blocks + self.sigmoid_scale = sigmoid_scale + self.gin_channels = gin_channels + + self.cond_layers = nn.ModuleList() + self.share_cond_layers = share_cond_layers + + self.flows = nn.ModuleList() + in_channels = in_channels * 2 + for l in range(n_split_blocks): + blocks = nn.ModuleList() + self.flows.append(blocks) + gin_channels = gin_channels * 2 + if gin_channels != 0 and share_cond_layers: + cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1) + self.cond_layers.append(torch.nn.utils.weight_norm(cond_layer, name='weight')) + for b in range(n_blocks): + blocks.append(ActNorm(channels=in_channels)) + blocks.append(InvConvNear(channels=in_channels, n_split=n_split)) + blocks.append(CouplingBlock( + in_channels, + hidden_channels, + kernel_size=kernel_size, + dilation_rate=dilation_rate, + n_layers=n_layers, + gin_channels=gin_channels, + p_dropout=p_dropout, + sigmoid_scale=sigmoid_scale, + share_cond_layers=share_cond_layers)) + + def forward(self, x=None, x_mask=None, g=None, reverse=False, concat_zs=True, + noise_scale=0.66, return_hiddens=False): + logdet_tot = 0 + if not reverse: + flows = self.flows + assert x_mask is not None + zs = [] + if return_hiddens: + hs = [] + for i, blocks in enumerate(flows): + x, x_mask = squeeze(x, x_mask) + g_ = None + if g is not None: + g, _ = squeeze(g) + if self.share_cond_layers: + g_ = self.cond_layers[i](g) + else: + g_ = g + for layer in blocks: + x, logdet = layer(x, x_mask=x_mask, g=g_, reverse=reverse) + if return_hiddens: + hs.append(x) + logdet_tot += logdet + if i == self.n_split_blocks - 1: + zs.append(x) + else: + x, z = torch.chunk(x, 2, 1) + zs.append(z) + if concat_zs: + zs = [z.reshape(x.shape[0], -1) for z in zs] + zs = torch.cat(zs, 1) # [B, C*T] + if return_hiddens: + return zs, logdet_tot, hs + return zs, logdet_tot + else: + flows = reversed(self.flows) + if x is not None: + assert isinstance(x, list) + zs = x + else: + B, _, T = g.shape + zs = self.get_prior(B, T, g.device, noise_scale) + zs_ori = zs + if g is not None: + g_, g = g, [] + for i in range(len(self.flows)): + g_, _ = squeeze(g_) + g.append(self.cond_layers[i](g_) if self.share_cond_layers else g_) + else: + g = [None for _ in range(len(self.flows))] + if x_mask is not None: + x_masks = [] + for i in range(len(self.flows)): + x_mask, _ = squeeze(x_mask) + x_masks.append(x_mask) + else: + x_masks = [None for _ in range(len(self.flows))] + x_masks = x_masks[::-1] + g = g[::-1] + zs = zs[::-1] + x = None + for i, blocks in enumerate(flows): + x = zs[i] if x is None else torch.cat([x, zs[i]], 1) + for layer in reversed(blocks): + x, logdet = layer(x, x_masks=x_masks[i], g=g[i], reverse=reverse) + logdet_tot += logdet + x, _ = unsqueeze(x) + return x, logdet_tot, zs_ori + + def store_inverse(self): + for f in self.modules(): + if hasattr(f, 'store_inverse') and f != self: + f.store_inverse() + + def remove_weight_norm(m): + try: + nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(remove_weight_norm) + + def get_prior(self, B, T, device, noise_scale=0.66): + C = 80 + zs = [] + for i in range(len(self.flows)): + C, T = C, T // 2 + if i == self.n_split_blocks - 1: + zs.append(torch.randn(B, C * 2, T).to(device) * noise_scale) + else: + zs.append(torch.randn(B, C, T).to(device) * noise_scale) + return zs + + +def squeeze(x, x_mask=None, n_sqz=2): + b, c, t = x.size() + + t = (t // n_sqz) * n_sqz + x = x[:, :, :t] + x_sqz = x.view(b, c, t // n_sqz, n_sqz) + x_sqz = x_sqz.permute(0, 3, 1, 2).contiguous().view(b, c * n_sqz, t // n_sqz) + + if x_mask is not None: + x_mask = x_mask[:, :, n_sqz - 1::n_sqz] + else: + x_mask = torch.ones(b, 1, t // n_sqz).to(device=x.device, dtype=x.dtype) + return x_sqz * x_mask, x_mask + + +def unsqueeze(x, x_mask=None, n_sqz=2): + b, c, t = x.size() + + x_unsqz = x.view(b, n_sqz, c // n_sqz, t) + x_unsqz = x_unsqz.permute(0, 2, 3, 1).contiguous().view(b, c // n_sqz, t * n_sqz) + + if x_mask is not None: + x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, n_sqz).view(b, 1, t * n_sqz) + else: + x_mask = torch.ones(b, 1, t * n_sqz).to(device=x.device, dtype=x.dtype) + return x_unsqz * x_mask, x_mask diff --git a/NeuralSeq/modules/GenerSpeech/model/mixstyle.py b/NeuralSeq/modules/GenerSpeech/model/mixstyle.py new file mode 100644 index 0000000000000000000000000000000000000000..8f592118f0bb224e8d004ce8884d0ff6f5fd9fc1 --- /dev/null +++ b/NeuralSeq/modules/GenerSpeech/model/mixstyle.py @@ -0,0 +1,63 @@ +from modules.commons.common_layers import * +import random + + +class MixStyle(nn.Module): + """MixStyle. + Reference: + Zhou et al. Domain Generalization with MixStyle. ICLR 2021. + """ + + def __init__(self, p=0.5, alpha=0.1, eps=1e-6, hidden_size=256): + """ + Args: + p (float): probability of using MixStyle. + alpha (float): parameter of the Beta distribution. + eps (float): scaling parameter to avoid numerical issues. + mix (str): how to mix. + """ + super().__init__() + self.p = p + self.beta = torch.distributions.Beta(alpha, alpha) + self.eps = eps + self.alpha = alpha + self._activated = True + self.hidden_size = hidden_size + self.affine_layer = LinearNorm( + hidden_size, + 2 * hidden_size, # For both b (bias) g (gain) + ) + + def __repr__(self): + return f'MixStyle(p={self.p}, alpha={self.alpha}, eps={self.eps})' + + def set_activation_status(self, status=True): + self._activated = status + + def forward(self, x, spk_embed): + if not self.training or not self._activated: + return x + + if random.random() > self.p: + return x + + B = x.size(0) + + mu, sig = torch.mean(x, dim=-1, keepdim=True), torch.std(x, dim=-1, keepdim=True) + x_normed = (x - mu) / (sig + 1e-6) # [B, T, H_m] + + lmda = self.beta.sample((B, 1, 1)) + lmda = lmda.to(x.device) + + # Get Bias and Gain + mu1, sig1 = torch.split(self.affine_layer(spk_embed), self.hidden_size, dim=-1) # [B, 1, 2 * H_m] --> 2 * [B, 1, H_m] + + # MixStyle + perm = torch.randperm(B) + mu2, sig2 = mu1[perm], sig1[perm] + + mu_mix = mu1*lmda + mu2 * (1-lmda) + sig_mix = sig1*lmda + sig2 * (1-lmda) + + # Perform Scailing and Shifting + return sig_mix * x_normed + mu_mix # [B, T, H_m] diff --git a/NeuralSeq/modules/GenerSpeech/model/prosody_util.py b/NeuralSeq/modules/GenerSpeech/model/prosody_util.py new file mode 100644 index 0000000000000000000000000000000000000000..113c39df9d1b0144aa5a5f00505c7e08bfc6ea11 --- /dev/null +++ b/NeuralSeq/modules/GenerSpeech/model/prosody_util.py @@ -0,0 +1,385 @@ +from torch import nn +import copy +import torch +from utils.hparams import hparams +from modules.GenerSpeech.model.wavenet import WN +import math + +from modules.fastspeech.tts_modules import LayerNorm +import torch.nn.functional as F +from utils.tts_utils import group_hidden_by_segs, sequence_mask + +from scipy.cluster.vq import kmeans2 +from torch.nn import functional as F + + +class VQEmbeddingEMA(nn.Module): + def __init__(self, n_embeddings, embedding_dim, commitment_cost=0.25, decay=0.999, epsilon=1e-5, + print_vq_prob=False): + super(VQEmbeddingEMA, self).__init__() + self.commitment_cost = commitment_cost + self.n_embeddings = n_embeddings + self.decay = decay + self.epsilon = epsilon + self.print_vq_prob = print_vq_prob + self.register_buffer('data_initialized', torch.zeros(1)) + init_bound = 1 / 512 + embedding = torch.Tensor(n_embeddings, embedding_dim) + embedding.uniform_(-init_bound, init_bound) + self.register_buffer("embedding", embedding) + self.register_buffer("ema_count", torch.zeros(n_embeddings)) + self.register_buffer("ema_weight", self.embedding.clone()) + + def encode(self, x): + B, T, _ = x.shape + M, D = self.embedding.size() + x_flat = x.detach().reshape(-1, D) + + distances = torch.addmm(torch.sum(self.embedding ** 2, dim=1) + + torch.sum(x_flat ** 2, dim=1, keepdim=True), + x_flat, self.embedding.t(), + alpha=-2.0, beta=1.0) # [B*T_mel, N_vq] + indices = torch.argmin(distances.float(), dim=-1) # [B*T_mel] + quantized = F.embedding(indices, self.embedding) + quantized = quantized.view_as(x) + return x_flat, quantized, indices + + def forward(self, x): + """ + + :param x: [B, T, D] + :return: [B, T, D] + """ + B, T, _ = x.shape + M, D = self.embedding.size() + if self.training and self.data_initialized.item() == 0: + print('| running kmeans in VQVAE') # data driven initialization for the embeddings + x_flat = x.detach().reshape(-1, D) + rp = torch.randperm(x_flat.size(0)) + kd = kmeans2(x_flat[rp].data.cpu().numpy(), self.n_embeddings, minit='points') + self.embedding.copy_(torch.from_numpy(kd[0])) + x_flat, quantized, indices = self.encode(x) + encodings = F.one_hot(indices, M).float() + self.ema_weight.copy_(torch.matmul(encodings.t(), x_flat)) + self.ema_count.copy_(torch.sum(encodings, dim=0)) + + x_flat, quantized, indices = self.encode(x) + encodings = F.one_hot(indices, M).float() + indices = indices.reshape(B, T) + + if self.training and self.data_initialized.item() != 0: + self.ema_count = self.decay * self.ema_count + (1 - self.decay) * torch.sum(encodings, dim=0) + + n = torch.sum(self.ema_count) + self.ema_count = (self.ema_count + self.epsilon) / (n + M * self.epsilon) * n + + dw = torch.matmul(encodings.t(), x_flat) + self.ema_weight = self.decay * self.ema_weight + (1 - self.decay) * dw + + self.embedding = self.ema_weight / self.ema_count.unsqueeze(-1) + self.data_initialized.fill_(1) + + e_latent_loss = F.mse_loss(x, quantized.detach(), reduction='none') + nonpadding = (x.abs().sum(-1) > 0).float() + e_latent_loss = (e_latent_loss.mean(-1) * nonpadding).sum() / nonpadding.sum() + loss = self.commitment_cost * e_latent_loss + + quantized = x + (quantized - x).detach() + + avg_probs = torch.mean(encodings, dim=0) + perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10))) + if self.print_vq_prob: + print("| VQ code avg_probs: ", avg_probs) + return quantized, loss, indices, perplexity + +class CrossAttenLayer(nn.Module): + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1): + super(CrossAttenLayer, self).__init__() + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout1 = nn.Dropout(dropout) + self.norm1 = nn.LayerNorm(d_model) + self.linear2 = nn.Linear(dim_feedforward, d_model) + self.dropout2 = nn.Dropout(dropout) + self.norm2 = nn.LayerNorm(d_model) + self.activation = nn.ReLU() + + def forward(self, src, local_emotion, emotion_key_padding_mask=None, forcing=False): + # src: (Tph, B, 256) local_emotion: (Temo, B, 256) emotion_key_padding_mask: (B, Temo) + if forcing: + maxlength = src.shape[0] + k = local_emotion.shape[0] / src.shape[0] + lengths1 = torch.ceil(torch.tensor([i for i in range(maxlength)]).to(src.device) * k) + 1 + lengths2 = torch.floor(torch.tensor([i for i in range(maxlength)]).to(src.device) * k) - 1 + mask1 = sequence_mask(lengths1, local_emotion.shape[0]) + mask2 = sequence_mask(lengths2, local_emotion.shape[0]) + mask = mask1.float() - mask2.float() + attn_emo = mask.repeat(src.shape[1], 1, 1) # (B, Tph, Temo) + src2 = torch.matmul(local_emotion.permute(1, 2, 0), attn_emo.float().transpose(1, 2)).permute(2, 0, 1) + else: + src2, attn_emo = self.multihead_attn(src, local_emotion, local_emotion, key_padding_mask=emotion_key_padding_mask) + src = src + self.dropout1(src2) + src = self.norm1(src) + src2 = self.linear2(self.activation(self.linear1(src))) + src = src + self.dropout2(src2) + src = self.norm2(src) + return src, attn_emo + + +class ProsodyAligner(nn.Module): + def __init__(self, num_layers, guided_sigma=0.3, guided_layers=None, norm=None): + super(ProsodyAligner, self).__init__() + self.layers = nn.ModuleList([CrossAttenLayer(d_model=hparams['hidden_size'], nhead=2) for _ in range(num_layers)]) + self.num_layers = num_layers + self.norm = norm + self.guided_sigma = guided_sigma + self.guided_layers = guided_layers if guided_layers is not None else num_layers + + def forward(self, src, local_emotion, src_key_padding_mask=None, emotion_key_padding_mask=None, forcing=False): + output = src + guided_loss = 0 + attn_emo_list = [] + for i, mod in enumerate(self.layers): + # output: (Tph, B, 256), global_emotion: (1, B, 256), local_emotion: (Temo, B, 256) mask: None, src_key_padding_mask: (B, Tph), + # emotion_key_padding_mask: (B, Temo) + output, attn_emo = mod(output, local_emotion, emotion_key_padding_mask=emotion_key_padding_mask, forcing=forcing) + attn_emo_list.append(attn_emo.unsqueeze(1)) + # attn_emo: (B, Tph, Temo) attn: (B, Tph, Tph) + if i < self.guided_layers and src_key_padding_mask is not None: + s_length = (~src_key_padding_mask).float().sum(-1) # B + emo_length = (~emotion_key_padding_mask).float().sum(-1) + attn_w_emo = _make_guided_attention_mask(src_key_padding_mask.size(-1), s_length, emotion_key_padding_mask.size(-1), emo_length, self.guided_sigma) + + g_loss_emo = attn_emo * attn_w_emo # N, L, S + non_padding_mask = (~src_key_padding_mask).unsqueeze(-1) & (~emotion_key_padding_mask).unsqueeze(1) + guided_loss = g_loss_emo[non_padding_mask].mean() + guided_loss + + if self.norm is not None: + output = self.norm(output) + + return output, guided_loss, attn_emo_list + +def _make_guided_attention_mask(ilen, rilen, olen, rolen, sigma): + grid_x, grid_y = torch.meshgrid(torch.arange(ilen, device=rilen.device), torch.arange(olen, device=rolen.device)) + grid_x = grid_x.unsqueeze(0).expand(rilen.size(0), -1, -1) + grid_y = grid_y.unsqueeze(0).expand(rolen.size(0), -1, -1) + rilen = rilen.unsqueeze(1).unsqueeze(1) + rolen = rolen.unsqueeze(1).unsqueeze(1) + return 1.0 - torch.exp( + -((grid_y.float() / rolen - grid_x.float() / rilen) ** 2) / (2 * (sigma ** 2)) + ) + +class LocalStyleAdaptor(nn.Module): + def __init__(self, hidden_size, num_vq_codes=64, padding_idx=0): + super(LocalStyleAdaptor, self).__init__() + self.encoder = ConvBlocks(80, hidden_size, [1] * 5, 5, dropout=hparams['vae_dropout']) + self.n_embed = num_vq_codes + self.vqvae = VQEmbeddingEMA(self.n_embed, hidden_size, commitment_cost=hparams['lambda_commit']) + self.wavenet = WN(hidden_channels=80, gin_channels=80, kernel_size=3, dilation_rate=1, n_layers=4) + self.padding_idx = padding_idx + self.hidden_size = hidden_size + + def forward(self, ref_mels, mel2ph=None, no_vq=False): + """ + + :param ref_mels: [B, T, 80] + :return: [B, 1, H] + """ + padding_mask = ref_mels[:, :, 0].eq(self.padding_idx).data + ref_mels = self.wavenet(ref_mels.transpose(1, 2), x_mask=(~padding_mask).unsqueeze(1).repeat([1, 80, 1])).transpose(1, 2) + if mel2ph is not None: + ref_ph, _ = group_hidden_by_segs(ref_mels, mel2ph, torch.max(mel2ph)) + else: + ref_ph = ref_mels + prosody = self.encoder(ref_ph) + if no_vq: + return prosody + z, vq_loss, vq_tokens, ppl = self.vqvae(prosody) + vq_loss = vq_loss.mean() + return z, vq_loss, ppl + + + + +class LambdaLayer(nn.Module): + def __init__(self, lambd): + super(LambdaLayer, self).__init__() + self.lambd = lambd + + def forward(self, x): + return self.lambd(x) + + +class Conv1d(nn.Conv1d): + """A wrapper around nn.Conv1d, that works on (batch, time, channels)""" + + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, dilation=1, groups=1, bias=True, padding=0): + super(Conv1d, self).__init__(in_channels=in_channels, out_channels=out_channels, + kernel_size=kernel_size, stride=stride, dilation=dilation, + groups=groups, bias=bias, padding=padding) + + def forward(self, x): + return super().forward(x.transpose(2, 1)).transpose(2, 1) + + +def init_weights_func(m): + classname = m.__class__.__name__ + if classname.find("Conv1d") != -1: + torch.nn.init.xavier_uniform_(m.weight) + + +class ResidualBlock(nn.Module): + """Implements conv->PReLU->norm n-times""" + + def __init__(self, channels, kernel_size, dilation, n=2, norm_type='bn', dropout=0.0, + c_multiple=2, ln_eps=1e-12): + super(ResidualBlock, self).__init__() + + if norm_type == 'bn': + norm_builder = lambda: nn.BatchNorm1d(channels) + elif norm_type == 'in': + norm_builder = lambda: nn.InstanceNorm1d(channels, affine=True) + elif norm_type == 'gn': + norm_builder = lambda: nn.GroupNorm(8, channels) + elif norm_type == 'ln': + norm_builder = lambda: LayerNorm(channels, dim=1, eps=ln_eps) + else: + norm_builder = lambda: nn.Identity() + + self.blocks = [ + nn.Sequential( + norm_builder(), + nn.Conv1d(channels, c_multiple * channels, kernel_size, dilation=dilation, + padding=(dilation * (kernel_size - 1)) // 2), + LambdaLayer(lambda x: x * kernel_size ** -0.5), + nn.GELU(), + nn.Conv1d(c_multiple * channels, channels, 1, dilation=dilation), + ) + for i in range(n) + ] + + self.blocks = nn.ModuleList(self.blocks) + self.dropout = dropout + + def forward(self, x): + nonpadding = (x.abs().sum(1) > 0).float()[:, None, :] + for b in self.blocks: + x_ = b(x) + if self.dropout > 0 and self.training: + x_ = F.dropout(x_, self.dropout, training=self.training) + x = x + x_ + x = x * nonpadding + return x + + +class Pad(nn.ZeroPad2d): + def __init__(self, kernel_size, dilation): + pad_total = dilation * (kernel_size - 1) + begin = pad_total // 2 + end = pad_total - begin + + super(Pad, self).__init__((begin, end, begin, end)) + + +class ZeroTemporalPad(nn.ZeroPad2d): + """Pad sequences to equal lentgh in the temporal dimension""" + + def __init__(self, kernel_size, dilation, causal=False): + total_pad = (dilation * (kernel_size - 1)) + + if causal: + super(ZeroTemporalPad, self).__init__((total_pad, 0)) + else: + begin = total_pad // 2 + end = total_pad - begin + super(ZeroTemporalPad, self).__init__((begin, end)) + + +class ConvBlocks(nn.Module): + """Decodes the expanded phoneme encoding into spectrograms""" + + def __init__(self, channels, out_dims, dilations, kernel_size, + norm_type='ln', layers_in_block=2, c_multiple=2, + dropout=0.0, ln_eps=1e-5, init_weights=True): + super(ConvBlocks, self).__init__() + self.res_blocks = nn.Sequential( + *[ResidualBlock(channels, kernel_size, d, + n=layers_in_block, norm_type=norm_type, c_multiple=c_multiple, + dropout=dropout, ln_eps=ln_eps) + for d in dilations], + ) + if norm_type == 'bn': + norm = nn.BatchNorm1d(channels) + elif norm_type == 'in': + norm = nn.InstanceNorm1d(channels, affine=True) + elif norm_type == 'gn': + norm = nn.GroupNorm(8, channels) + elif norm_type == 'ln': + norm = LayerNorm(channels, dim=1, eps=ln_eps) + self.last_norm = norm + self.post_net1 = nn.Conv1d(channels, out_dims, kernel_size=3, padding=1) + if init_weights: + self.apply(init_weights_func) + + def forward(self, x): + """ + + :param x: [B, T, H] + :return: [B, T, H] + """ + x = x.transpose(1, 2) + nonpadding = (x.abs().sum(1) > 0).float()[:, None, :] + x = self.res_blocks(x) * nonpadding + x = self.last_norm(x) * nonpadding + x = self.post_net1(x) * nonpadding + return x.transpose(1, 2) + + +class TextConvEncoder(ConvBlocks): + def __init__(self, embed_tokens, channels, out_dims, dilations, kernel_size, + norm_type='ln', layers_in_block=2, c_multiple=2, + dropout=0.0, ln_eps=1e-5, init_weights=True): + super().__init__(channels, out_dims, dilations, kernel_size, + norm_type, layers_in_block, c_multiple, + dropout, ln_eps, init_weights) + self.embed_tokens = embed_tokens + self.embed_scale = math.sqrt(channels) + + def forward(self, txt_tokens): + """ + + :param txt_tokens: [B, T] + :return: { + 'encoder_out': [B x T x C] + } + """ + x = self.embed_scale * self.embed_tokens(txt_tokens) + return super().forward(x) + + +class ConditionalConvBlocks(ConvBlocks): + def __init__(self, channels, g_channels, out_dims, dilations, kernel_size, + norm_type='ln', layers_in_block=2, c_multiple=2, + dropout=0.0, ln_eps=1e-5, init_weights=True, is_BTC=True): + super().__init__(channels, out_dims, dilations, kernel_size, + norm_type, layers_in_block, c_multiple, + dropout, ln_eps, init_weights) + self.g_prenet = nn.Conv1d(g_channels, channels, 3, padding=1) + self.is_BTC = is_BTC + if init_weights: + self.g_prenet.apply(init_weights_func) + + def forward(self, x, g, x_mask): + if self.is_BTC: + x = x.transpose(1, 2) + g = g.transpose(1, 2) + x_mask = x_mask.transpose(1, 2) + x = x + self.g_prenet(g) + x = x * x_mask + + if not self.is_BTC: + x = x.transpose(1, 2) + x = super(ConditionalConvBlocks, self).forward(x) # input needs to be BTC + if not self.is_BTC: + x = x.transpose(1, 2) + return x diff --git a/NeuralSeq/modules/GenerSpeech/model/wavenet.py b/NeuralSeq/modules/GenerSpeech/model/wavenet.py new file mode 100644 index 0000000000000000000000000000000000000000..481c02d9cc776eba40e578e1b2549bf352357be8 --- /dev/null +++ b/NeuralSeq/modules/GenerSpeech/model/wavenet.py @@ -0,0 +1,87 @@ +from modules.commons.common_layers import * + + +# @torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + +class WN(torch.nn.Module): + def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, + p_dropout=0, share_cond_layers=False): + super(WN, self).__init__() + assert (kernel_size % 2 == 1) + assert (hidden_channels % 2 == 0) + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + self.share_cond_layers = share_cond_layers + + self.in_layers = torch.nn.ModuleList() + self.res_skip_layers = torch.nn.ModuleList() + self.drop = nn.Dropout(p_dropout) + + if gin_channels != 0 and not share_cond_layers: + cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') + + for i in range(n_layers): + dilation = dilation_rate ** i + padding = int((kernel_size * dilation - dilation) / 2) + in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size, + dilation=dilation, padding=padding) + in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') + self.in_layers.append(in_layer) + + # last one is not necessary + if i < n_layers - 1: + res_skip_channels = 2 * hidden_channels + else: + res_skip_channels = hidden_channels + + res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) + res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') + self.res_skip_layers.append(res_skip_layer) + + def forward(self, x, x_mask=None, g=None, **kwargs): + output = torch.zeros_like(x) + n_channels_tensor = torch.IntTensor([self.hidden_channels]) + + if g is not None and not self.share_cond_layers: + g = self.cond_layer(g) + + for i in range(self.n_layers): + x_in = self.in_layers[i](x) + x_in = self.drop(x_in) + if g is not None: + cond_offset = i * 2 * self.hidden_channels + g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :] + else: + g_l = torch.zeros_like(x_in) + + acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) + + res_skip_acts = self.res_skip_layers[i](acts) + if i < self.n_layers - 1: + x = (x + res_skip_acts[:, :self.hidden_channels, :]) * x_mask + output = output + res_skip_acts[:, self.hidden_channels:, :] + else: + output = output + res_skip_acts + return output * x_mask + + def remove_weight_norm(self): + def remove_weight_norm(m): + try: + nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(remove_weight_norm) diff --git a/NeuralSeq/modules/GenerSpeech/task/dataset.py b/NeuralSeq/modules/GenerSpeech/task/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..0b1f6fa2c596a23b9bc49ad470c1723d3597d738 --- /dev/null +++ b/NeuralSeq/modules/GenerSpeech/task/dataset.py @@ -0,0 +1,193 @@ +import matplotlib +matplotlib.use('Agg') +from tasks.base_task import data_loader +from tasks.tts.fs2 import FastSpeech2Task +from tasks.tts.dataset_utils import FastSpeechDataset, BaseTTSDataset +import glob +import importlib +from utils.pitch_utils import norm_interp_f0, denorm_f0, f0_to_coarse +from inference.base_tts_infer import load_data_preprocessor +from data_gen.tts.emotion import inference as EmotionEncoder +from data_gen.tts.emotion.inference import embed_utterance as Embed_utterance +from data_gen.tts.emotion.inference import preprocess_wav +from tqdm import tqdm +from utils.hparams import hparams +from data_gen.tts.data_gen_utils import build_phone_encoder, build_word_encoder +import random +import torch +import torch.optim +import torch.nn.functional as F +import torch.utils.data +from utils.indexed_datasets import IndexedDataset +from resemblyzer import VoiceEncoder +import torch.distributions +import numpy as np +import utils +import os + + + +class GenerSpeech_dataset(BaseTTSDataset): + def __init__(self, prefix, shuffle=False, test_items=None, test_sizes=None, data_dir=None): + super().__init__(prefix, shuffle, test_items, test_sizes, data_dir) + self.f0_mean, self.f0_std = hparams.get('f0_mean', None), hparams.get('f0_std', None) + if prefix == 'valid': + indexed_ds = IndexedDataset(f'{self.data_dir}/train') + sizes = np.load(f'{self.data_dir}/train_lengths.npy') + index = [i for i in range(len(indexed_ds))] + random.shuffle(index) + index = index[:300] + self.sizes = sizes[index] + self.indexed_ds = [] + for i in index: + self.indexed_ds.append(indexed_ds[i]) + self.avail_idxs = list(range(len(self.sizes))) + if hparams['min_frames'] > 0: + self.avail_idxs = [x for x in self.avail_idxs if self.sizes[x] >= hparams['min_frames']] + self.sizes = [self.sizes[i] for i in self.avail_idxs] + + if prefix == 'test' and hparams['test_input_dir'] != '': + self.preprocessor, self.preprocess_args = load_data_preprocessor() + self.indexed_ds, self.sizes = self.load_test_inputs(hparams['test_input_dir']) + self.avail_idxs = [i for i, _ in enumerate(self.sizes)] + + + def load_test_inputs(self, test_input_dir): + inp_wav_paths = sorted(glob.glob(f'{test_input_dir}/*.wav') + glob.glob(f'{test_input_dir}/*.mp3')) + binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizerr.BaseBinarizer') + pkg = ".".join(binarizer_cls.split(".")[:-1]) + cls_name = binarizer_cls.split(".")[-1] + binarizer_cls = getattr(importlib.import_module(pkg), cls_name) + + phone_encoder = build_phone_encoder(hparams['binary_data_dir']) + word_encoder = build_word_encoder(hparams['binary_data_dir']) + voice_encoder = VoiceEncoder().cuda() + + encoder = [phone_encoder, word_encoder] + sizes = [] + items = [] + EmotionEncoder.load_model(hparams['emotion_encoder_path']) + preprocessor, preprocess_args = self.preprocessor, self.preprocess_args + + for wav_fn in tqdm(inp_wav_paths): + item_name = wav_fn[len(test_input_dir) + 1:].replace("/", "_") + spk_id = emotion = 0 + item2tgfn = wav_fn.replace('.wav', '.TextGrid') # prepare textgrid alignment + txtpath = wav_fn.replace('.wav', '.txt') # prepare text + with open(txtpath, 'r') as f: + text_raw = f.readlines() + f.close() + ph, txt = preprocessor.txt_to_ph(preprocessor.txt_processor, text_raw[0], preprocess_args) + + item = binarizer_cls.process_item(item_name, ph, txt, item2tgfn, wav_fn, spk_id, emotion, encoder, hparams['binarization_args']) + item['emo_embed'] = Embed_utterance(preprocess_wav(item['wav_fn'])) + item['spk_embed'] = voice_encoder.embed_utterance(item['wav']) + items.append(item) + sizes.append(item['len']) + return items, sizes + + def _get_item(self, index): + if hasattr(self, 'avail_idxs') and self.avail_idxs is not None: + index = self.avail_idxs[index] + if self.indexed_ds is None: + self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}') + return self.indexed_ds[index] + + def __getitem__(self, index): + hparams = self.hparams + item = self._get_item(index) + assert len(item['mel']) == self.sizes[index], (len(item['mel']), self.sizes[index]) + max_frames = hparams['max_frames'] + spec = torch.Tensor(item['mel'])[:max_frames] + max_frames = spec.shape[0] // hparams['frames_multiple'] * hparams['frames_multiple'] + spec = spec[:max_frames] + phone = torch.LongTensor(item['phone'][:hparams['max_input_tokens']]) + sample = { + "id": index, + "item_name": item['item_name'], + "text": item['txt'], + "txt_token": phone, + "mel": spec, + "mel_nonpadding": spec.abs().sum(-1) > 0, + } + spec = sample['mel'] + T = spec.shape[0] + sample['mel2ph'] = mel2ph = torch.LongTensor(item['mel2ph'])[:T] if 'mel2ph' in item else None + if hparams['use_pitch_embed']: + assert 'f0' in item + if hparams.get('normalize_pitch', False): + f0 = item["f0"] + if len(f0 > 0) > 0 and f0[f0 > 0].std() > 0: + f0[f0 > 0] = (f0[f0 > 0] - f0[f0 > 0].mean()) / f0[f0 > 0].std() * hparams['f0_std'] + \ + hparams['f0_mean'] + f0[f0 > 0] = f0[f0 > 0].clip(min=60, max=500) + pitch = f0_to_coarse(f0) + pitch = torch.LongTensor(pitch[:max_frames]) + else: + pitch = torch.LongTensor(item.get("pitch"))[:max_frames] if "pitch" in item else None + f0, uv = norm_interp_f0(item["f0"][:max_frames], hparams) + uv = torch.FloatTensor(uv) + f0 = torch.FloatTensor(f0) + else: + f0 = uv = torch.zeros_like(mel2ph) + pitch = None + sample["f0"], sample["uv"], sample["pitch"] = f0, uv, pitch + sample["spk_embed"] = torch.Tensor(item['spk_embed']) + sample["emotion"] = item['emotion'] + sample["emo_embed"] = torch.Tensor(item['emo_embed']) + + if hparams.get('use_word', False): + sample["ph_words"] = item["ph_words"] + sample["word_tokens"] = torch.LongTensor(item["word_tokens"]) + sample["mel2word"] = torch.LongTensor(item.get("mel2word"))[:max_frames] + sample["ph2word"] = torch.LongTensor(item['ph2word'][:hparams['max_input_tokens']]) + return sample + + def collater(self, samples): + if len(samples) == 0: + return {} + hparams = self.hparams + id = torch.LongTensor([s['id'] for s in samples]) + item_names = [s['item_name'] for s in samples] + text = [s['text'] for s in samples] + txt_tokens = utils.collate_1d([s['txt_token'] for s in samples], 0) + mels = utils.collate_2d([s['mel'] for s in samples], 0.0) + txt_lengths = torch.LongTensor([s['txt_token'].numel() for s in samples]) + mel_lengths = torch.LongTensor([s['mel'].shape[0] for s in samples]) + + batch = { + 'id': id, + 'item_name': item_names, + 'nsamples': len(samples), + 'text': text, + 'txt_tokens': txt_tokens, + 'txt_lengths': txt_lengths, + 'mels': mels, + 'mel_lengths': mel_lengths, + } + + f0 = utils.collate_1d([s['f0'] for s in samples], 0.0) + pitch = utils.collate_1d([s['pitch'] for s in samples]) if samples[0]['pitch'] is not None else None + uv = utils.collate_1d([s['uv'] for s in samples]) + mel2ph = utils.collate_1d([s['mel2ph'] for s in samples], 0.0) if samples[0]['mel2ph'] is not None else None + batch.update({ + 'mel2ph': mel2ph, + 'pitch': pitch, + 'f0': f0, + 'uv': uv, + }) + spk_embed = torch.stack([s['spk_embed'] for s in samples]) + batch['spk_embed'] = spk_embed + emo_embed = torch.stack([s['emo_embed'] for s in samples]) + batch['emo_embed'] = emo_embed + + if hparams.get('use_word', False): + ph_words = [s['ph_words'] for s in samples] + batch['ph_words'] = ph_words + word_tokens = utils.collate_1d([s['word_tokens'] for s in samples], 0) + batch['word_tokens'] = word_tokens + mel2word = utils.collate_1d([s['mel2word'] for s in samples], 0) + batch['mel2word'] = mel2word + ph2word = utils.collate_1d([s['ph2word'] for s in samples], 0) + batch['ph2word'] = ph2word + return batch \ No newline at end of file diff --git a/NeuralSeq/modules/GenerSpeech/task/generspeech.py b/NeuralSeq/modules/GenerSpeech/task/generspeech.py new file mode 100644 index 0000000000000000000000000000000000000000..7536bf651ebcfeef3df97c752b4801c487671e95 --- /dev/null +++ b/NeuralSeq/modules/GenerSpeech/task/generspeech.py @@ -0,0 +1,271 @@ +import matplotlib +matplotlib.use('Agg') +from data_gen.tts.data_gen_utils import get_pitch +from modules.fastspeech.tts_modules import mel2ph_to_dur +import matplotlib.pyplot as plt +from utils import audio +from utils.pitch_utils import norm_interp_f0, denorm_f0, f0_to_coarse +from vocoders.base_vocoder import get_vocoder_cls +import json +from utils.plot import spec_to_figure +from utils.hparams import hparams +import torch +import torch.optim +import torch.nn.functional as F +import torch.utils.data +from modules.GenerSpeech.task.dataset import GenerSpeech_dataset +from modules.GenerSpeech.model.generspeech import GenerSpeech +import torch.distributions +import numpy as np +from utils.tts_utils import select_attn +import utils +import os +from tasks.tts.fs2 import FastSpeech2Task + +class GenerSpeechTask(FastSpeech2Task): + def __init__(self): + super(GenerSpeechTask, self).__init__() + self.dataset_cls = GenerSpeech_dataset + + def build_tts_model(self): + self.model = GenerSpeech(self.phone_encoder) + + def build_model(self): + self.build_tts_model() + if hparams['load_ckpt'] != '': + self.load_ckpt(hparams['load_ckpt'], strict=False) + utils.num_params(self.model) + return self.model + + def run_model(self, model, sample, return_output=False): + txt_tokens = sample['txt_tokens'] # [B, T_t] + target = sample['mels'] # [B, T_s, 80] + mel2ph = sample['mel2ph'] # [B, T_s] + mel2word = sample['mel2word'] + f0 = sample['f0'] # [B, T_s] + uv = sample['uv'] # [B, T_s] 0/1 + + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + emo_embed = sample.get('emo_embed') + output = model(txt_tokens, mel2ph=mel2ph, ref_mel2ph=mel2ph, ref_mel2word=mel2word, spk_embed=spk_embed, emo_embed=emo_embed, + ref_mels=target, f0=f0, uv=uv, tgt_mels=target, global_steps=self.global_step, infer=False) + losses = {} + losses['postflow'] = output['postflow'] + if self.global_step > hparams['forcing']: + losses['gloss'] = (output['gloss_utter'] + output['gloss_ph'] + output['gloss_word']) / 3 + if self.global_step > hparams['vq_start']: + losses['vq_loss'] = (output['vq_loss_utter'] + output['vq_loss_ph'] + output['vq_loss_word']) / 3 + losses['ppl_utter'] = output['ppl_utter'] + losses['ppl_ph'] = output['ppl_ph'] + losses['ppl_word'] = output['ppl_word'] + self.add_mel_loss(output['mel_out'], target, losses) + self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses) + if hparams['use_pitch_embed']: + self.add_pitch_loss(output, sample, losses) + output['select_attn'] = select_attn(output['attn_ph']) + + if not return_output: + return losses + else: + return losses, output + + def validation_step(self, sample, batch_idx): + outputs = {} + outputs['losses'] = {} + outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True) + outputs['total_loss'] = sum(outputs['losses'].values()) + outputs['nsamples'] = sample['nsamples'] + encdec_attn = model_out['select_attn'] + mel_out = self.model.out2mel(model_out['mel_out']) + outputs = utils.tensors_to_scalars(outputs) + if self.global_step % hparams['valid_infer_interval'] == 0 \ + and batch_idx < hparams['num_valid_plots']: + vmin = hparams['mel_vmin'] + vmax = hparams['mel_vmax'] + self.plot_mel(batch_idx, sample['mels'], mel_out) + self.plot_dur(batch_idx, sample, model_out) + if hparams['use_pitch_embed']: + self.plot_pitch(batch_idx, sample, model_out) + if self.vocoder is None: + self.vocoder = get_vocoder_cls(hparams)() + if self.global_step > 0: + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + emo_embed = sample.get('emo_embed') + ref_mels = sample['mels'] + mel2ph = sample['mel2ph'] # [B, T_s] + mel2word = sample['mel2word'] + # with gt duration + model_out = self.model(sample['txt_tokens'], mel2ph=mel2ph, ref_mel2ph=mel2ph, ref_mel2word=mel2word, spk_embed=spk_embed, + emo_embed=emo_embed, ref_mels=ref_mels, global_steps=self.global_step, infer=True) + wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu()) + self.logger.add_audio(f'wav_gtdur_{batch_idx}', wav_pred, self.global_step, + hparams['audio_sample_rate']) + self.logger.add_figure(f'ali_{batch_idx}', spec_to_figure(encdec_attn[0]), self.global_step) + self.logger.add_figure( + f'mel_gtdur_{batch_idx}', + spec_to_figure(model_out['mel_out'][0], vmin, vmax), self.global_step) + # with pred duration + model_out = self.model(sample['txt_tokens'], ref_mel2ph=mel2ph, ref_mel2word=mel2word, spk_embed=spk_embed, emo_embed=emo_embed, ref_mels=ref_mels, + global_steps=self.global_step, infer=True) + self.logger.add_figure( + f'mel_{batch_idx}', + spec_to_figure(model_out['mel_out'][0], vmin, vmax), self.global_step) + wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu()) + self.logger.add_audio(f'wav_{batch_idx}', wav_pred, self.global_step, hparams['audio_sample_rate']) + # gt wav + if self.global_step <= hparams['valid_infer_interval']: + mel_gt = sample['mels'][0].cpu() + wav_gt = self.vocoder.spec2wav(mel_gt) + self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, 22050) + return outputs + + ############ + # infer + ############ + def test_step(self, sample, batch_idx): + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + emo_embed = sample.get('emo_embed') + txt_tokens = sample['txt_tokens'] + mel2ph, uv, f0 = None, None, None + ref_mel2word = sample['mel2word'] + ref_mel2ph = sample['mel2ph'] + ref_mels = sample['mels'] + if hparams['use_gt_dur']: + mel2ph = sample['mel2ph'] + if hparams['use_gt_f0']: + f0 = sample['f0'] + uv = sample['uv'] + global_steps = 200000 + run_model = lambda: self.model( + txt_tokens, spk_embed=spk_embed, emo_embed=emo_embed, mel2ph=mel2ph, ref_mel2ph=ref_mel2ph, ref_mel2word=ref_mel2word, + f0=f0, uv=uv, ref_mels=ref_mels, global_steps=global_steps, infer=True) + outputs = run_model() + sample['outputs'] = self.model.out2mel(outputs['mel_out']) + sample['mel2ph_pred'] = outputs['mel2ph'] + if hparams['use_pitch_embed']: + sample['f0'] = denorm_f0(sample['f0'], sample['uv'], hparams) + if hparams['pitch_type'] == 'ph': + sample['f0'] = torch.gather(F.pad(sample['f0'], [1, 0]), 1, sample['mel2ph']) + sample['f0_pred'] = outputs.get('f0_denorm') + + return self.after_infer(sample) + + + + def after_infer(self, predictions, sil_start_frame=0): + predictions = utils.unpack_dict_to_list(predictions) + assert len(predictions) == 1, 'Only support batch_size=1 in inference.' + prediction = predictions[0] + prediction = utils.tensors_to_np(prediction) + item_name = prediction.get('item_name') + text = prediction.get('text') + ph_tokens = prediction.get('txt_tokens') + mel_gt = prediction["mels"] + mel2ph_gt = prediction.get("mel2ph") + mel2ph_gt = mel2ph_gt if mel2ph_gt is not None else None + mel_pred = prediction["outputs"] + mel2ph_pred = prediction.get("mel2ph_pred") + f0_gt = prediction.get("f0") + f0_pred = prediction.get("f0_pred") + + str_phs = None + if self.phone_encoder is not None and 'txt_tokens' in prediction: + str_phs = self.phone_encoder.decode(prediction['txt_tokens'], strip_padding=True) + + if 'encdec_attn' in prediction: + encdec_attn = prediction['encdec_attn'] # (1, Tph, Tmel) + encdec_attn = encdec_attn[encdec_attn.max(-1).sum(-1).argmax(-1)] + txt_lengths = prediction.get('txt_lengths') + encdec_attn = encdec_attn.T[:, :txt_lengths] + else: + encdec_attn = None + + wav_pred = self.vocoder.spec2wav(mel_pred, f0=f0_pred) + wav_pred[:sil_start_frame * hparams['hop_size']] = 0 + gen_dir = self.gen_dir + base_fn = f'[{self.results_id:06d}][{item_name}][%s]' + # if text is not None: + # base_fn += text.replace(":", "%3A")[:80] + base_fn = base_fn.replace(' ', '_') + if not hparams['profile_infer']: + os.makedirs(gen_dir, exist_ok=True) + os.makedirs(f'{gen_dir}/wavs', exist_ok=True) + os.makedirs(f'{gen_dir}/plot', exist_ok=True) + if hparams.get('save_mel_npy', False): + os.makedirs(f'{gen_dir}/npy', exist_ok=True) + if 'encdec_attn' in prediction: + os.makedirs(f'{gen_dir}/attn_plot', exist_ok=True) + self.saving_results_futures.append( + self.saving_result_pool.apply_async(self.save_result, args=[ + wav_pred, mel_pred, base_fn % 'TTS', gen_dir, str_phs, mel2ph_pred, encdec_attn])) + + if mel_gt is not None and hparams['save_gt']: + wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt) + self.saving_results_futures.append( + self.saving_result_pool.apply_async(self.save_result, args=[ + wav_gt, mel_gt, base_fn % 'Ref', gen_dir, str_phs, mel2ph_gt])) + if hparams['save_f0']: + import matplotlib.pyplot as plt + f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams) + f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams) + fig = plt.figure() + plt.plot(f0_pred_, label=r'$\hat{f_0}$') + plt.plot(f0_gt_, label=r'$f_0$') + plt.legend() + plt.tight_layout() + plt.savefig(f'{gen_dir}/plot/[F0][{item_name}]{text}.png', format='png') + plt.close(fig) + + print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}") + self.results_id += 1 + return { + 'item_name': item_name, + 'text': text, + 'ph_tokens': self.phone_encoder.decode(ph_tokens.tolist()), + 'wav_fn_pred': base_fn % 'TTS', + 'wav_fn_gt': base_fn % 'Ref', + } + + + + @staticmethod + def save_result(wav_out, mel, base_fn, gen_dir, str_phs=None, mel2ph=None, alignment=None): + audio.save_wav(wav_out, f'{gen_dir}/wavs/{base_fn}.wav', hparams['audio_sample_rate'], + norm=hparams['out_wav_norm']) + fig = plt.figure(figsize=(14, 10)) + spec_vmin = hparams['mel_vmin'] + spec_vmax = hparams['mel_vmax'] + heatmap = plt.pcolor(mel.T, vmin=spec_vmin, vmax=spec_vmax) + fig.colorbar(heatmap) + f0, _ = get_pitch(wav_out, mel, hparams) + f0 = f0 / 10 * (f0 > 0) + plt.plot(f0, c='white', linewidth=1, alpha=0.6) + if mel2ph is not None and str_phs is not None: + decoded_txt = str_phs.split(" ") + dur = mel2ph_to_dur(torch.LongTensor(mel2ph)[None, :], len(decoded_txt))[0].numpy() + dur = [0] + list(np.cumsum(dur)) + for i in range(len(dur) - 1): + shift = (i % 20) + 1 + plt.text(dur[i], shift, decoded_txt[i]) + plt.hlines(shift, dur[i], dur[i + 1], colors='b' if decoded_txt[i] != '|' else 'black') + plt.vlines(dur[i], 0, 5, colors='b' if decoded_txt[i] != '|' else 'black', + alpha=1, linewidth=1) + plt.tight_layout() + plt.savefig(f'{gen_dir}/plot/{base_fn}.png', format='png') + plt.close(fig) + if hparams.get('save_mel_npy', False): + np.save(f'{gen_dir}/npy/{base_fn}', mel) + if alignment is not None: + fig, ax = plt.subplots(figsize=(12, 16)) + im = ax.imshow(alignment, aspect='auto', origin='lower', + interpolation='none') + ax.set_xticks(np.arange(0, alignment.shape[1], 5)) + ax.set_yticks(np.arange(0, alignment.shape[0], 10)) + ax.set_ylabel("$S_p$ index") + ax.set_xlabel("$H_c$ index") + fig.colorbar(im, ax=ax) + fig.savefig(f'{gen_dir}/attn_plot/{base_fn}_attn.png', format='png') + plt.close(fig) + + + diff --git a/NeuralSeq/modules/__init__.py b/NeuralSeq/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/NeuralSeq/modules/__pycache__/__init__.cpython-37.pyc b/NeuralSeq/modules/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f34d33c24157b9d3332a2728c36005b6e92b8dac Binary files /dev/null and b/NeuralSeq/modules/__pycache__/__init__.cpython-37.pyc differ diff --git a/NeuralSeq/modules/__pycache__/__init__.cpython-38.pyc b/NeuralSeq/modules/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8a11d844e87fc91b95dff9bfc5c730261213582 Binary files /dev/null and b/NeuralSeq/modules/__pycache__/__init__.cpython-38.pyc differ diff --git a/NeuralSeq/modules/commons/__pycache__/common_layers.cpython-37.pyc b/NeuralSeq/modules/commons/__pycache__/common_layers.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdb37d2ad6351932f028bc3648d92b004865b481 Binary files /dev/null and b/NeuralSeq/modules/commons/__pycache__/common_layers.cpython-37.pyc differ diff --git a/NeuralSeq/modules/commons/__pycache__/common_layers.cpython-38.pyc b/NeuralSeq/modules/commons/__pycache__/common_layers.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7672903cc9cebc26885054c339c00b5b70a1ea15 Binary files /dev/null and b/NeuralSeq/modules/commons/__pycache__/common_layers.cpython-38.pyc differ diff --git a/NeuralSeq/modules/commons/__pycache__/espnet_positional_embedding.cpython-37.pyc b/NeuralSeq/modules/commons/__pycache__/espnet_positional_embedding.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d248158e423ac4015e60e5e83c90dab64cb1e91 Binary files /dev/null and b/NeuralSeq/modules/commons/__pycache__/espnet_positional_embedding.cpython-37.pyc differ diff --git a/NeuralSeq/modules/commons/__pycache__/espnet_positional_embedding.cpython-38.pyc b/NeuralSeq/modules/commons/__pycache__/espnet_positional_embedding.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fb73865ef69ebb9ba72131f3e643aaaef81a5b6 Binary files /dev/null and b/NeuralSeq/modules/commons/__pycache__/espnet_positional_embedding.cpython-38.pyc differ diff --git a/NeuralSeq/modules/commons/__pycache__/ssim.cpython-37.pyc b/NeuralSeq/modules/commons/__pycache__/ssim.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b276e0758b9a898a3032c71dd2805236c5ad9f0 Binary files /dev/null and b/NeuralSeq/modules/commons/__pycache__/ssim.cpython-37.pyc differ diff --git a/NeuralSeq/modules/commons/__pycache__/ssim.cpython-38.pyc b/NeuralSeq/modules/commons/__pycache__/ssim.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a3903d17d9eac07d557acc94182ef93e0b7c22e Binary files /dev/null and b/NeuralSeq/modules/commons/__pycache__/ssim.cpython-38.pyc differ diff --git a/NeuralSeq/modules/commons/align_ops.py b/NeuralSeq/modules/commons/align_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a190d63a3f3ba31f41754975569336a87c63089d --- /dev/null +++ b/NeuralSeq/modules/commons/align_ops.py @@ -0,0 +1,25 @@ +import torch +import torch.nn.functional as F + + +def build_word_mask(x2word, y2word): + return (x2word[:, :, None] == y2word[:, None, :]).long() + + +def mel2ph_to_mel2word(mel2ph, ph2word): + mel2word = (ph2word - 1).gather(1, (mel2ph - 1).clamp(min=0)) + 1 + mel2word = mel2word * (mel2ph > 0).long() + return mel2word + + +def clip_mel2token_to_multiple(mel2token, frames_multiple): + max_frames = mel2token.shape[1] // frames_multiple * frames_multiple + mel2token = mel2token[:, :max_frames] + return mel2token + + +def expand_states(h, mel2token): + h = F.pad(h, [0, 0, 1, 0]) + mel2token_ = mel2token[..., None].repeat([1, 1, h.shape[-1]]) + h = torch.gather(h, 1, mel2token_) # [B, T, H] + return h diff --git a/NeuralSeq/modules/commons/common_layers.py b/NeuralSeq/modules/commons/common_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..0657b0dcb3cb00340e8b354bd4a196c527aa668e --- /dev/null +++ b/NeuralSeq/modules/commons/common_layers.py @@ -0,0 +1,667 @@ +import math +import torch +from torch import nn +from torch.nn import Parameter +import torch.onnx.operators +import torch.nn.functional as F +import utils + + +class Reshape(nn.Module): + def __init__(self, *args): + super(Reshape, self).__init__() + self.shape = args + + def forward(self, x): + return x.view(self.shape) + + +class Permute(nn.Module): + def __init__(self, *args): + super(Permute, self).__init__() + self.args = args + + def forward(self, x): + return x.permute(self.args) + + +class LinearNorm(torch.nn.Module): + def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'): + super(LinearNorm, self).__init__() + self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias) + + torch.nn.init.xavier_uniform_( + self.linear_layer.weight, + gain=torch.nn.init.calculate_gain(w_init_gain)) + + def forward(self, x): + return self.linear_layer(x) + + +class ConvNorm(torch.nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, + padding=None, dilation=1, bias=True, w_init_gain='linear'): + super(ConvNorm, self).__init__() + if padding is None: + assert (kernel_size % 2 == 1) + padding = int(dilation * (kernel_size - 1) / 2) + + self.conv = torch.nn.Conv1d(in_channels, out_channels, + kernel_size=kernel_size, stride=stride, + padding=padding, dilation=dilation, + bias=bias) + + torch.nn.init.xavier_uniform_( + self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) + + def forward(self, signal): + conv_signal = self.conv(signal) + return conv_signal + + +def Embedding(num_embeddings, embedding_dim, padding_idx=None): + m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) + nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) + if padding_idx is not None: + nn.init.constant_(m.weight[padding_idx], 0) + return m + + +def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): + if not export and torch.cuda.is_available(): + try: + from apex.normalization import FusedLayerNorm + return FusedLayerNorm(normalized_shape, eps, elementwise_affine) + except ImportError: + pass + return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) + +def Linear(in_features, out_features, bias=True): + m = nn.Linear(in_features, out_features, bias) + nn.init.xavier_uniform_(m.weight) + if bias: + nn.init.constant_(m.bias, 0.) + return m + + +class SinusoidalPositionalEmbedding(nn.Module): + """This module produces sinusoidal positional embeddings of any length. + + Padding symbols are ignored. + """ + + def __init__(self, embedding_dim, padding_idx, init_size=1024): + super().__init__() + self.embedding_dim = embedding_dim + self.padding_idx = padding_idx + self.weights = SinusoidalPositionalEmbedding.get_embedding( + init_size, + embedding_dim, + padding_idx, + ) + self.register_buffer('_float_tensor', torch.FloatTensor(1)) + + @staticmethod + def get_embedding(num_embeddings, embedding_dim, padding_idx=None): + """Build sinusoidal embeddings. + + This matches the implementation in tensor2tensor, but differs slightly + from the description in Section 3.5 of "Attention Is All You Need". + """ + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) + emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) + if embedding_dim % 2 == 1: + # zero pad + emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) + if padding_idx is not None: + emb[padding_idx, :] = 0 + return emb + + def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs): + """Input is expected to be of size [bsz x seqlen].""" + bsz, seq_len = input.shape[:2] + max_pos = self.padding_idx + 1 + seq_len + if self.weights is None or max_pos > self.weights.size(0): + # recompute/expand embeddings if needed + self.weights = SinusoidalPositionalEmbedding.get_embedding( + max_pos, + self.embedding_dim, + self.padding_idx, + ) + self.weights = self.weights.to(self._float_tensor) + + if incremental_state is not None: + # positions is the same for every token when decoding a single step + pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len + return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) + + positions = utils.make_positions(input, self.padding_idx) if positions is None else positions + return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach() + + def max_positions(self): + """Maximum number of supported positions.""" + return int(1e5) # an arbitrary large number + + +class ConvTBC(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, padding=0): + super(ConvTBC, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.padding = padding + + self.weight = torch.nn.Parameter(torch.Tensor( + self.kernel_size, in_channels, out_channels)) + self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) + + def forward(self, input): + return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding) + + +class MultiheadAttention(nn.Module): + def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True, + add_bias_kv=False, add_zero_attn=False, self_attention=False, + encoder_decoder_attention=False): + super().__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + self.scaling = self.head_dim ** -0.5 + + self.self_attention = self_attention + self.encoder_decoder_attention = encoder_decoder_attention + + assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \ + 'value to be of the same size' + + if self.qkv_same_dim: + self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) + else: + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + + if bias: + self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + if add_bias_kv: + self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) + self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self.reset_parameters() + + self.enable_torch_version = False + if hasattr(F, "multi_head_attention_forward"): + self.enable_torch_version = True + else: + self.enable_torch_version = False + self.last_attn_probs = None + + def reset_parameters(self): + if self.qkv_same_dim: + nn.init.xavier_uniform_(self.in_proj_weight) + else: + nn.init.xavier_uniform_(self.k_proj_weight) + nn.init.xavier_uniform_(self.v_proj_weight) + nn.init.xavier_uniform_(self.q_proj_weight) + + nn.init.xavier_uniform_(self.out_proj.weight) + if self.in_proj_bias is not None: + nn.init.constant_(self.in_proj_bias, 0.) + nn.init.constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + nn.init.xavier_normal_(self.bias_k) + if self.bias_v is not None: + nn.init.xavier_normal_(self.bias_v) + + def forward( + self, + query, key, value, + key_padding_mask=None, + incremental_state=None, + need_weights=True, + static_kv=False, + attn_mask=None, + before_softmax=False, + need_head_weights=False, + enc_dec_attn_constraint_mask=None, + reset_attn_weight=None + ): + """Input shape: Time x Batch x Channel + + Args: + key_padding_mask (ByteTensor, optional): mask to exclude + keys that are pads, of shape `(batch, src_len)`, where + padding elements are indicated by 1s. + need_weights (bool, optional): return the attention weights, + averaged over heads (default: False). + attn_mask (ByteTensor, optional): typically used to + implement causal attention, where the mask prevents the + attention from looking forward in time (default: None). + before_softmax (bool, optional): return the raw attention + weights and values before the attention softmax. + need_head_weights (bool, optional): return the attention + weights for each head. Implies *need_weights*. Default: + return the average attention weights over all heads. + """ + if need_head_weights: + need_weights = True + + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == self.embed_dim + assert list(query.size()) == [tgt_len, bsz, embed_dim] + + if self.enable_torch_version and incremental_state is None and not static_kv and reset_attn_weight is None: + if self.qkv_same_dim: + return F.multi_head_attention_forward(query, key, value, + self.embed_dim, self.num_heads, + self.in_proj_weight, + self.in_proj_bias, self.bias_k, self.bias_v, + self.add_zero_attn, self.dropout, + self.out_proj.weight, self.out_proj.bias, + self.training, key_padding_mask, need_weights, + attn_mask) + else: + return F.multi_head_attention_forward(query, key, value, + self.embed_dim, self.num_heads, + torch.empty([0]), + self.in_proj_bias, self.bias_k, self.bias_v, + self.add_zero_attn, self.dropout, + self.out_proj.weight, self.out_proj.bias, + self.training, key_padding_mask, need_weights, + attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, + k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight) + + if incremental_state is not None: + print('Not implemented error.') + exit() + else: + saved_state = None + + if self.self_attention: + # self-attention + q, k, v = self.in_proj_qkv(query) + elif self.encoder_decoder_attention: + # encoder-decoder attention + q = self.in_proj_q(query) + if key is None: + assert value is None + k = v = None + else: + k = self.in_proj_k(key) + v = self.in_proj_v(key) + + else: + q = self.in_proj_q(query) + k = self.in_proj_k(key) + v = self.in_proj_v(value) + q *= self.scaling + + if self.bias_k is not None: + assert self.bias_v is not None + k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1) + if key_padding_mask is not None: + key_padding_mask = torch.cat( + [key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1) + + q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) + + if saved_state is not None: + print('Not implemented error.') + exit() + + src_len = k.size(1) + + # This is part of a workaround to get around fork/join parallelism + # not supporting Optional types. + if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]): + key_padding_mask = None + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if self.add_zero_attn: + src_len += 1 + k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) + v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) + if attn_mask is not None: + attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1) + if key_padding_mask is not None: + key_padding_mask = torch.cat( + [key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1) + + attn_weights = torch.bmm(q, k.transpose(1, 2)) + attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) + + assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] + + if attn_mask is not None: + if len(attn_mask.shape) == 2: + attn_mask = attn_mask.unsqueeze(0) + elif len(attn_mask.shape) == 3: + attn_mask = attn_mask[:, None].repeat([1, self.num_heads, 1, 1]).reshape( + bsz * self.num_heads, tgt_len, src_len) + attn_weights = attn_weights + attn_mask + + if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.masked_fill( + enc_dec_attn_constraint_mask.unsqueeze(2).bool(), + -1e9, + ) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if key_padding_mask is not None: + # don't attend to padding symbols + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + -1e9, + ) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + + if before_softmax: + return attn_weights, v + + attn_weights_float = utils.softmax(attn_weights, dim=-1) + attn_weights = attn_weights_float.type_as(attn_weights) + attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training) + + if reset_attn_weight is not None: + if reset_attn_weight: + self.last_attn_probs = attn_probs.detach() + else: + assert self.last_attn_probs is not None + attn_probs = self.last_attn_probs + attn = torch.bmm(attn_probs, v) + assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] + attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn = self.out_proj(attn) + + if need_weights: + attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0) + if not need_head_weights: + # average attention weights over heads + attn_weights = attn_weights.mean(dim=0) + else: + attn_weights = None + + return attn, (attn_weights, attn_logits) + + def in_proj_qkv(self, query): + return self._in_proj(query).chunk(3, dim=-1) + + def in_proj_q(self, query): + if self.qkv_same_dim: + return self._in_proj(query, end=self.embed_dim) + else: + bias = self.in_proj_bias + if bias is not None: + bias = bias[:self.embed_dim] + return F.linear(query, self.q_proj_weight, bias) + + def in_proj_k(self, key): + if self.qkv_same_dim: + return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) + else: + weight = self.k_proj_weight + bias = self.in_proj_bias + if bias is not None: + bias = bias[self.embed_dim:2 * self.embed_dim] + return F.linear(key, weight, bias) + + def in_proj_v(self, value): + if self.qkv_same_dim: + return self._in_proj(value, start=2 * self.embed_dim) + else: + weight = self.v_proj_weight + bias = self.in_proj_bias + if bias is not None: + bias = bias[2 * self.embed_dim:] + return F.linear(value, weight, bias) + + def _in_proj(self, input, start=0, end=None): + weight = self.in_proj_weight + bias = self.in_proj_bias + weight = weight[start:end, :] + if bias is not None: + bias = bias[start:end] + return F.linear(input, weight, bias) + + + def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz): + return attn_weights + + +class Swish(torch.autograd.Function): + @staticmethod + def forward(ctx, i): + result = i * torch.sigmoid(i) + ctx.save_for_backward(i) + return result + + @staticmethod + def backward(ctx, grad_output): + i = ctx.saved_variables[0] + sigmoid_i = torch.sigmoid(i) + return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i))) + + +class CustomSwish(nn.Module): + def forward(self, input_tensor): + return Swish.apply(input_tensor) + + +class TransformerFFNLayer(nn.Module): + def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0., act='gelu'): + super().__init__() + self.kernel_size = kernel_size + self.dropout = dropout + self.act = act + if padding == 'SAME': + self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2) + elif padding == 'LEFT': + self.ffn_1 = nn.Sequential( + nn.ConstantPad1d((kernel_size - 1, 0), 0.0), + nn.Conv1d(hidden_size, filter_size, kernel_size) + ) + self.ffn_2 = Linear(filter_size, hidden_size) + if self.act == 'swish': + self.swish_fn = CustomSwish() + + def forward(self, x, incremental_state=None): + # x: T x B x C + if incremental_state is not None: + assert incremental_state is None, 'Nar-generation does not allow this.' + exit(1) + + x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1) + x = x * self.kernel_size ** -0.5 + + if incremental_state is not None: + x = x[-1:] + if self.act == 'gelu': + x = F.gelu(x) + if self.act == 'relu': + x = F.relu(x) + if self.act == 'swish': + x = self.swish_fn(x) + x = F.dropout(x, self.dropout, training=self.training) + x = self.ffn_2(x) + return x + + +class BatchNorm1dTBC(nn.Module): + def __init__(self, c): + super(BatchNorm1dTBC, self).__init__() + self.bn = nn.BatchNorm1d(c) + + def forward(self, x): + """ + + :param x: [T, B, C] + :return: [T, B, C] + """ + x = x.permute(1, 2, 0) # [B, C, T] + x = self.bn(x) # [B, C, T] + x = x.permute(2, 0, 1) # [T, B, C] + return x + + +class EncSALayer(nn.Module): + def __init__(self, c, num_heads, dropout, attention_dropout=0.1, + relu_dropout=0.1, kernel_size=9, padding='SAME', norm='ln', act='gelu'): + super().__init__() + self.c = c + self.dropout = dropout + self.num_heads = num_heads + if num_heads > 0: + if norm == 'ln': + self.layer_norm1 = LayerNorm(c) + elif norm == 'bn': + self.layer_norm1 = BatchNorm1dTBC(c) + self.self_attn = MultiheadAttention( + self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False, + ) + if norm == 'ln': + self.layer_norm2 = LayerNorm(c) + elif norm == 'bn': + self.layer_norm2 = BatchNorm1dTBC(c) + self.ffn = TransformerFFNLayer( + c, 4 * c, kernel_size=kernel_size, dropout=relu_dropout, padding=padding, act=act) + + def forward(self, x, encoder_padding_mask=None, **kwargs): + layer_norm_training = kwargs.get('layer_norm_training', None) + if layer_norm_training is not None: + self.layer_norm1.training = layer_norm_training + self.layer_norm2.training = layer_norm_training + if self.num_heads > 0: + residual = x + x = self.layer_norm1(x) + x, _, = self.self_attn( + query=x, + key=x, + value=x, + key_padding_mask=encoder_padding_mask + ) + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None] + + residual = x + x = self.layer_norm2(x) + x = self.ffn(x) + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None] + return x + + +class DecSALayer(nn.Module): + def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1, kernel_size=9, act='gelu'): + super().__init__() + self.c = c + self.dropout = dropout + self.layer_norm1 = LayerNorm(c) + self.self_attn = MultiheadAttention( + c, num_heads, self_attention=True, dropout=attention_dropout, bias=False + ) + self.layer_norm2 = LayerNorm(c) + self.encoder_attn = MultiheadAttention( + c, num_heads, encoder_decoder_attention=True, dropout=attention_dropout, bias=False, + ) + self.layer_norm3 = LayerNorm(c) + self.ffn = TransformerFFNLayer( + c, 4 * c, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout, act=act) + + def forward( + self, + x, + encoder_out=None, + encoder_padding_mask=None, + incremental_state=None, + self_attn_mask=None, + self_attn_padding_mask=None, + attn_out=None, + reset_attn_weight=None, + **kwargs, + ): + layer_norm_training = kwargs.get('layer_norm_training', None) + if layer_norm_training is not None: + self.layer_norm1.training = layer_norm_training + self.layer_norm2.training = layer_norm_training + self.layer_norm3.training = layer_norm_training + residual = x + x = self.layer_norm1(x) + x, _ = self.self_attn( + query=x, + key=x, + value=x, + key_padding_mask=self_attn_padding_mask, + incremental_state=incremental_state, + attn_mask=self_attn_mask + ) + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + + residual = x + x = self.layer_norm2(x) + if encoder_out is not None: + x, attn = self.encoder_attn( + query=x, + key=encoder_out, + value=encoder_out, + key_padding_mask=encoder_padding_mask, + incremental_state=incremental_state, + static_kv=True, + enc_dec_attn_constraint_mask=None, #utils.get_incremental_state(self, incremental_state, 'enc_dec_attn_constraint_mask'), + reset_attn_weight=reset_attn_weight + ) + attn_logits = attn[1] + else: + assert attn_out is not None + x = self.encoder_attn.in_proj_v(attn_out.transpose(0, 1)) + attn_logits = None + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + + residual = x + x = self.layer_norm3(x) + x = self.ffn(x, incremental_state=incremental_state) + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + # if len(attn_logits.size()) > 3: + # indices = attn_logits.softmax(-1).max(-1).values.sum(-1).argmax(-1) + # attn_logits = attn_logits.gather(1, + # indices[:, None, None, None].repeat(1, 1, attn_logits.size(-2), attn_logits.size(-1))).squeeze(1) + return x, attn_logits diff --git a/NeuralSeq/modules/commons/conv.py b/NeuralSeq/modules/commons/conv.py new file mode 100644 index 0000000000000000000000000000000000000000..a86505f4216231418390610e423e42b3b3b77b15 --- /dev/null +++ b/NeuralSeq/modules/commons/conv.py @@ -0,0 +1,168 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +from modules.commons.common_layers import Embedding +from modules.fastspeech.tts_modules import LayerNorm + + +class LambdaLayer(nn.Module): + def __init__(self, lambd): + super(LambdaLayer, self).__init__() + self.lambd = lambd + + def forward(self, x): + return self.lambd(x) + + +def init_weights_func(m): + classname = m.__class__.__name__ + if classname.find("Conv1d") != -1: + torch.nn.init.xavier_uniform_(m.weight) + + +class ResidualBlock(nn.Module): + """Implements conv->PReLU->norm n-times""" + + def __init__(self, channels, kernel_size, dilation, n=2, norm_type='bn', dropout=0.0, + c_multiple=2, ln_eps=1e-12): + super(ResidualBlock, self).__init__() + + if norm_type == 'bn': + norm_builder = lambda: nn.BatchNorm1d(channels) + elif norm_type == 'in': + norm_builder = lambda: nn.InstanceNorm1d(channels, affine=True) + elif norm_type == 'gn': + norm_builder = lambda: nn.GroupNorm(8, channels) + elif norm_type == 'ln': + norm_builder = lambda: LayerNorm(channels, dim=1, eps=ln_eps) + else: + norm_builder = lambda: nn.Identity() + + self.blocks = [ + nn.Sequential( + norm_builder(), + nn.Conv1d(channels, c_multiple * channels, kernel_size, dilation=dilation, + padding=(dilation * (kernel_size - 1)) // 2), + LambdaLayer(lambda x: x * kernel_size ** -0.5), + nn.GELU(), + nn.Conv1d(c_multiple * channels, channels, 1, dilation=dilation), + ) + for i in range(n) + ] + + self.blocks = nn.ModuleList(self.blocks) + self.dropout = dropout + + def forward(self, x): + nonpadding = (x.abs().sum(1) > 0).float()[:, None, :] + for b in self.blocks: + x_ = b(x) + if self.dropout > 0 and self.training: + x_ = F.dropout(x_, self.dropout, training=self.training) + x = x + x_ + x = x * nonpadding + return x + + +class ConvBlocks(nn.Module): + """Decodes the expanded phoneme encoding into spectrograms""" + + def __init__(self, hidden_size, out_dims, dilations, kernel_size, + norm_type='ln', layers_in_block=2, c_multiple=2, + dropout=0.0, ln_eps=1e-5, + init_weights=True, is_BTC=True, num_layers=None, post_net_kernel=3): + super(ConvBlocks, self).__init__() + self.is_BTC = is_BTC + if num_layers is not None: + dilations = [1] * num_layers + self.res_blocks = nn.Sequential( + *[ResidualBlock(hidden_size, kernel_size, d, + n=layers_in_block, norm_type=norm_type, c_multiple=c_multiple, + dropout=dropout, ln_eps=ln_eps) + for d in dilations], + ) + if norm_type == 'bn': + norm = nn.BatchNorm1d(hidden_size) + elif norm_type == 'in': + norm = nn.InstanceNorm1d(hidden_size, affine=True) + elif norm_type == 'gn': + norm = nn.GroupNorm(8, hidden_size) + elif norm_type == 'ln': + norm = LayerNorm(hidden_size, dim=1, eps=ln_eps) + self.last_norm = norm + self.post_net1 = nn.Conv1d(hidden_size, out_dims, kernel_size=post_net_kernel, + padding=post_net_kernel // 2) + if init_weights: + self.apply(init_weights_func) + + def forward(self, x, nonpadding=None): + """ + + :param x: [B, T, H] + :return: [B, T, H] + """ + if self.is_BTC: + x = x.transpose(1, 2) + if nonpadding is None: + nonpadding = (x.abs().sum(1) > 0).float()[:, None, :] + elif self.is_BTC: + nonpadding = nonpadding.transpose(1, 2) + x = self.res_blocks(x) * nonpadding + x = self.last_norm(x) * nonpadding + x = self.post_net1(x) * nonpadding + if self.is_BTC: + x = x.transpose(1, 2) + return x + + +class TextConvEncoder(ConvBlocks): + def __init__(self, dict_size, hidden_size, out_dims, dilations, kernel_size, + norm_type='ln', layers_in_block=2, c_multiple=2, + dropout=0.0, ln_eps=1e-5, init_weights=True, num_layers=None, post_net_kernel=3): + super().__init__(hidden_size, out_dims, dilations, kernel_size, + norm_type, layers_in_block, c_multiple, + dropout, ln_eps, init_weights, num_layers=num_layers, + post_net_kernel=post_net_kernel) + self.embed_tokens = Embedding(dict_size, hidden_size, 0) + self.embed_scale = math.sqrt(hidden_size) + + def forward(self, txt_tokens): + """ + + :param txt_tokens: [B, T] + :return: { + 'encoder_out': [B x T x C] + } + """ + x = self.embed_scale * self.embed_tokens(txt_tokens) + return super().forward(x) + + +class ConditionalConvBlocks(ConvBlocks): + def __init__(self, hidden_size, c_cond, c_out, dilations, kernel_size, + norm_type='ln', layers_in_block=2, c_multiple=2, + dropout=0.0, ln_eps=1e-5, init_weights=True, is_BTC=True, num_layers=None): + super().__init__(hidden_size, c_out, dilations, kernel_size, + norm_type, layers_in_block, c_multiple, + dropout, ln_eps, init_weights, is_BTC=False, num_layers=num_layers) + self.g_prenet = nn.Conv1d(c_cond, hidden_size, 3, padding=1) + self.is_BTC_ = is_BTC + if init_weights: + self.g_prenet.apply(init_weights_func) + + def forward(self, x, cond, nonpadding=None): + if self.is_BTC_: + x = x.transpose(1, 2) + cond = cond.transpose(1, 2) + if nonpadding is not None: + nonpadding = nonpadding.transpose(1, 2) + if nonpadding is None: + nonpadding = x.abs().sum(1)[:, None] + x = x + self.g_prenet(cond) + x = x * nonpadding + x = super(ConditionalConvBlocks, self).forward(x) # input needs to be BTC + if self.is_BTC_: + x = x.transpose(1, 2) + return x diff --git a/NeuralSeq/modules/commons/espnet_positional_embedding.py b/NeuralSeq/modules/commons/espnet_positional_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..74decb6ab300951490ae08a4b93041a0542b5bb7 --- /dev/null +++ b/NeuralSeq/modules/commons/espnet_positional_embedding.py @@ -0,0 +1,113 @@ +import math +import torch + + +class PositionalEncoding(torch.nn.Module): + """Positional encoding. + Args: + d_model (int): Embedding dimension. + dropout_rate (float): Dropout rate. + max_len (int): Maximum input length. + reverse (bool): Whether to reverse the input position. + """ + + def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False): + """Construct an PositionalEncoding object.""" + super(PositionalEncoding, self).__init__() + self.d_model = d_model + self.reverse = reverse + self.xscale = math.sqrt(self.d_model) + self.dropout = torch.nn.Dropout(p=dropout_rate) + self.pe = None + self.extend_pe(torch.tensor(0.0).expand(1, max_len)) + + def extend_pe(self, x): + """Reset the positional encodings.""" + if self.pe is not None: + if self.pe.size(1) >= x.size(1): + if self.pe.dtype != x.dtype or self.pe.device != x.device: + self.pe = self.pe.to(dtype=x.dtype, device=x.device) + return + pe = torch.zeros(x.size(1), self.d_model) + if self.reverse: + position = torch.arange( + x.size(1) - 1, -1, -1.0, dtype=torch.float32 + ).unsqueeze(1) + else: + position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) + div_term = torch.exp( + torch.arange(0, self.d_model, 2, dtype=torch.float32) + * -(math.log(10000.0) / self.d_model) + ) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = pe.unsqueeze(0) + self.pe = pe.to(device=x.device, dtype=x.dtype) + + def forward(self, x: torch.Tensor): + """Add positional encoding. + Args: + x (torch.Tensor): Input tensor (batch, time, `*`). + Returns: + torch.Tensor: Encoded tensor (batch, time, `*`). + """ + self.extend_pe(x) + x = x * self.xscale + self.pe[:, : x.size(1)] + return self.dropout(x) + + +class ScaledPositionalEncoding(PositionalEncoding): + """Scaled positional encoding module. + See Sec. 3.2 https://arxiv.org/abs/1809.08895 + Args: + d_model (int): Embedding dimension. + dropout_rate (float): Dropout rate. + max_len (int): Maximum input length. + """ + + def __init__(self, d_model, dropout_rate, max_len=5000): + """Initialize class.""" + super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len) + self.alpha = torch.nn.Parameter(torch.tensor(1.0)) + + def reset_parameters(self): + """Reset parameters.""" + self.alpha.data = torch.tensor(1.0) + + def forward(self, x): + """Add positional encoding. + Args: + x (torch.Tensor): Input tensor (batch, time, `*`). + Returns: + torch.Tensor: Encoded tensor (batch, time, `*`). + """ + self.extend_pe(x) + x = x + self.alpha * self.pe[:, : x.size(1)] + return self.dropout(x) + + +class RelPositionalEncoding(PositionalEncoding): + """Relative positional encoding module. + See : Appendix B in https://arxiv.org/abs/1901.02860 + Args: + d_model (int): Embedding dimension. + dropout_rate (float): Dropout rate. + max_len (int): Maximum input length. + """ + + def __init__(self, d_model, dropout_rate, max_len=5000): + """Initialize class.""" + super().__init__(d_model, dropout_rate, max_len, reverse=True) + + def forward(self, x): + """Compute positional encoding. + Args: + x (torch.Tensor): Input tensor (batch, time, `*`). + Returns: + torch.Tensor: Encoded tensor (batch, time, `*`). + torch.Tensor: Positional embedding tensor (1, time, `*`). + """ + self.extend_pe(x) + x = x * self.xscale + pos_emb = self.pe[:, : x.size(1)] + return self.dropout(x) + self.dropout(pos_emb) \ No newline at end of file diff --git a/NeuralSeq/modules/commons/normalizing_flow/glow_modules.py b/NeuralSeq/modules/commons/normalizing_flow/glow_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..1120e43133f7b76bf7ac52c120377ce57aae836c --- /dev/null +++ b/NeuralSeq/modules/commons/normalizing_flow/glow_modules.py @@ -0,0 +1,362 @@ +import scipy +from torch.nn import functional as F +import torch +from torch import nn +import numpy as np +from modules.commons.wavenet import WN +from modules.glow import utils + + +class ActNorm(nn.Module): + def __init__(self, channels, ddi=False, **kwargs): + super().__init__() + self.channels = channels + self.initialized = not ddi + + self.logs = nn.Parameter(torch.zeros(1, channels, 1)) + self.bias = nn.Parameter(torch.zeros(1, channels, 1)) + + def forward(self, x, x_mask=None, reverse=False, **kwargs): + if x_mask is None: + x_mask = torch.ones(x.size(0), 1, x.size(2)).to(device=x.device, dtype=x.dtype) + x_len = torch.sum(x_mask, [1, 2]) + if not self.initialized: + self.initialize(x, x_mask) + self.initialized = True + + if reverse: + z = (x - self.bias) * torch.exp(-self.logs) * x_mask + logdet = torch.sum(-self.logs) * x_len + else: + z = (self.bias + torch.exp(self.logs) * x) * x_mask + logdet = torch.sum(self.logs) * x_len # [b] + return z, logdet + + def store_inverse(self): + pass + + def set_ddi(self, ddi): + self.initialized = not ddi + + def initialize(self, x, x_mask): + with torch.no_grad(): + denom = torch.sum(x_mask, [0, 2]) + m = torch.sum(x * x_mask, [0, 2]) / denom + m_sq = torch.sum(x * x * x_mask, [0, 2]) / denom + v = m_sq - (m ** 2) + logs = 0.5 * torch.log(torch.clamp_min(v, 1e-6)) + + bias_init = (-m * torch.exp(-logs)).view(*self.bias.shape).to(dtype=self.bias.dtype) + logs_init = (-logs).view(*self.logs.shape).to(dtype=self.logs.dtype) + + self.bias.data.copy_(bias_init) + self.logs.data.copy_(logs_init) + + +class InvConvNear(nn.Module): + def __init__(self, channels, n_split=4, no_jacobian=False, lu=True, n_sqz=2, **kwargs): + super().__init__() + assert (n_split % 2 == 0) + self.channels = channels + self.n_split = n_split + self.n_sqz = n_sqz + self.no_jacobian = no_jacobian + + w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split).normal_())[0] + if torch.det(w_init) < 0: + w_init[:, 0] = -1 * w_init[:, 0] + self.lu = lu + if lu: + # LU decomposition can slightly speed up the inverse + np_p, np_l, np_u = scipy.linalg.lu(w_init) + np_s = np.diag(np_u) + np_sign_s = np.sign(np_s) + np_log_s = np.log(np.abs(np_s)) + np_u = np.triu(np_u, k=1) + l_mask = np.tril(np.ones(w_init.shape, dtype=float), -1) + eye = np.eye(*w_init.shape, dtype=float) + + self.register_buffer('p', torch.Tensor(np_p.astype(float))) + self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(float))) + self.l = nn.Parameter(torch.Tensor(np_l.astype(float)), requires_grad=True) + self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(float)), requires_grad=True) + self.u = nn.Parameter(torch.Tensor(np_u.astype(float)), requires_grad=True) + self.register_buffer('l_mask', torch.Tensor(l_mask)) + self.register_buffer('eye', torch.Tensor(eye)) + else: + self.weight = nn.Parameter(w_init) + + def forward(self, x, x_mask=None, reverse=False, **kwargs): + b, c, t = x.size() + assert (c % self.n_split == 0) + if x_mask is None: + x_mask = 1 + x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t + else: + x_len = torch.sum(x_mask, [1, 2]) + + x = x.view(b, self.n_sqz, c // self.n_split, self.n_split // self.n_sqz, t) + x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.n_split, c // self.n_split, t) + + if self.lu: + self.weight, log_s = self._get_weight() + logdet = log_s.sum() + logdet = logdet * (c / self.n_split) * x_len + else: + logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len # [b] + + if reverse: + if hasattr(self, "weight_inv"): + weight = self.weight_inv + else: + weight = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype) + logdet = -logdet + else: + weight = self.weight + if self.no_jacobian: + logdet = 0 + + weight = weight.view(self.n_split, self.n_split, 1, 1) + z = F.conv2d(x, weight) + + z = z.view(b, self.n_sqz, self.n_split // self.n_sqz, c // self.n_split, t) + z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask + return z, logdet + + def _get_weight(self): + l, log_s, u = self.l, self.log_s, self.u + l = l * self.l_mask + self.eye + u = u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(log_s)) + weight = torch.matmul(self.p, torch.matmul(l, u)) + return weight, log_s + + def store_inverse(self): + weight, _ = self._get_weight() + self.weight_inv = torch.inverse(weight.float()).to(next(self.parameters()).device) + + +class InvConv(nn.Module): + def __init__(self, channels, no_jacobian=False, lu=True, **kwargs): + super().__init__() + w_shape = [channels, channels] + w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(float) + LU_decomposed = lu + if not LU_decomposed: + # Sample a random orthogonal matrix: + self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init))) + else: + np_p, np_l, np_u = scipy.linalg.lu(w_init) + np_s = np.diag(np_u) + np_sign_s = np.sign(np_s) + np_log_s = np.log(np.abs(np_s)) + np_u = np.triu(np_u, k=1) + l_mask = np.tril(np.ones(w_shape, dtype=float), -1) + eye = np.eye(*w_shape, dtype=float) + + self.register_buffer('p', torch.Tensor(np_p.astype(float))) + self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(float))) + self.l = nn.Parameter(torch.Tensor(np_l.astype(float))) + self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(float))) + self.u = nn.Parameter(torch.Tensor(np_u.astype(float))) + self.l_mask = torch.Tensor(l_mask) + self.eye = torch.Tensor(eye) + self.w_shape = w_shape + self.LU = LU_decomposed + self.weight = None + + def get_weight(self, device, reverse): + w_shape = self.w_shape + self.p = self.p.to(device) + self.sign_s = self.sign_s.to(device) + self.l_mask = self.l_mask.to(device) + self.eye = self.eye.to(device) + l = self.l * self.l_mask + self.eye + u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(self.log_s)) + dlogdet = self.log_s.sum() + if not reverse: + w = torch.matmul(self.p, torch.matmul(l, u)) + else: + l = torch.inverse(l.double()).float() + u = torch.inverse(u.double()).float() + w = torch.matmul(u, torch.matmul(l, self.p.inverse())) + return w.view(w_shape[0], w_shape[1], 1), dlogdet + + def forward(self, x, x_mask=None, reverse=False, **kwargs): + """ + log-det = log|abs(|W|)| * pixels + """ + b, c, t = x.size() + if x_mask is None: + x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t + else: + x_len = torch.sum(x_mask, [1, 2]) + logdet = 0 + if not reverse: + weight, dlogdet = self.get_weight(x.device, reverse) + z = F.conv1d(x, weight) + if logdet is not None: + logdet = logdet + dlogdet * x_len + return z, logdet + else: + if self.weight is None: + weight, dlogdet = self.get_weight(x.device, reverse) + else: + weight, dlogdet = self.weight, self.dlogdet + z = F.conv1d(x, weight) + if logdet is not None: + logdet = logdet - dlogdet * x_len + return z, logdet + + def store_inverse(self): + self.weight, self.dlogdet = self.get_weight('cuda', reverse=True) + + +class CouplingBlock(nn.Module): + def __init__(self, in_channels, hidden_channels, kernel_size, dilation_rate, n_layers, + gin_channels=0, p_dropout=0, sigmoid_scale=False, wn=None): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + self.sigmoid_scale = sigmoid_scale + + start = torch.nn.Conv1d(in_channels // 2, hidden_channels, 1) + start = torch.nn.utils.weight_norm(start) + self.start = start + # Initializing last layer to 0 makes the affine coupling layers + # do nothing at first. This helps with training stability + end = torch.nn.Conv1d(hidden_channels, in_channels, 1) + end.weight.data.zero_() + end.bias.data.zero_() + self.end = end + self.wn = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels, p_dropout) + if wn is not None: + self.wn.in_layers = wn.in_layers + self.wn.res_skip_layers = wn.res_skip_layers + + def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs): + if x_mask is None: + x_mask = 1 + x_0, x_1 = x[:, :self.in_channels // 2], x[:, self.in_channels // 2:] + + x = self.start(x_0) * x_mask + x = self.wn(x, x_mask, g) + out = self.end(x) + + z_0 = x_0 + m = out[:, :self.in_channels // 2, :] + logs = out[:, self.in_channels // 2:, :] + if self.sigmoid_scale: + logs = torch.log(1e-6 + torch.sigmoid(logs + 2)) + if reverse: + z_1 = (x_1 - m) * torch.exp(-logs) * x_mask + logdet = torch.sum(-logs * x_mask, [1, 2]) + else: + z_1 = (m + torch.exp(logs) * x_1) * x_mask + logdet = torch.sum(logs * x_mask, [1, 2]) + z = torch.cat([z_0, z_1], 1) + return z, logdet + + def store_inverse(self): + self.wn.remove_weight_norm() + + +class Glow(nn.Module): + def __init__(self, + in_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_blocks, + n_layers, + p_dropout=0., + n_split=4, + n_sqz=2, + sigmoid_scale=False, + gin_channels=0, + inv_conv_type='near', + share_cond_layers=False, + share_wn_layers=0, + ): + super().__init__() + + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_blocks = n_blocks + self.n_layers = n_layers + self.p_dropout = p_dropout + self.n_split = n_split + self.n_sqz = n_sqz + self.sigmoid_scale = sigmoid_scale + self.gin_channels = gin_channels + self.share_cond_layers = share_cond_layers + if gin_channels != 0 and share_cond_layers: + cond_layer = torch.nn.Conv1d(gin_channels * n_sqz, 2 * hidden_channels * n_layers, 1) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') + wn = None + self.flows = nn.ModuleList() + for b in range(n_blocks): + self.flows.append(ActNorm(channels=in_channels * n_sqz)) + if inv_conv_type == 'near': + self.flows.append(InvConvNear(channels=in_channels * n_sqz, n_split=n_split, n_sqz=n_sqz)) + if inv_conv_type == 'invconv': + self.flows.append(InvConv(channels=in_channels * n_sqz)) + if share_wn_layers > 0: + if b % share_wn_layers == 0: + wn = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels * n_sqz, + p_dropout, share_cond_layers) + self.flows.append( + CouplingBlock( + in_channels * n_sqz, + hidden_channels, + kernel_size=kernel_size, + dilation_rate=dilation_rate, + n_layers=n_layers, + gin_channels=gin_channels * n_sqz, + p_dropout=p_dropout, + sigmoid_scale=sigmoid_scale, + wn=wn + )) + + def forward(self, x, x_mask=None, g=None, reverse=False, return_hiddens=False): + logdet_tot = 0 + if not reverse: + flows = self.flows + else: + flows = reversed(self.flows) + if return_hiddens: + hs = [] + if self.n_sqz > 1: + x, x_mask_ = utils.squeeze(x, x_mask, self.n_sqz) + if g is not None: + g, _ = utils.squeeze(g, x_mask, self.n_sqz) + x_mask = x_mask_ + if self.share_cond_layers and g is not None: + g = self.cond_layer(g) + for f in flows: + x, logdet = f(x, x_mask, g=g, reverse=reverse) + if return_hiddens: + hs.append(x) + logdet_tot += logdet + if self.n_sqz > 1: + x, x_mask = utils.unsqueeze(x, x_mask, self.n_sqz) + if return_hiddens: + return x, logdet_tot, hs + return x, logdet_tot + + def store_inverse(self): + def remove_weight_norm(m): + try: + nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(remove_weight_norm) + for f in self.flows: + f.store_inverse() diff --git a/NeuralSeq/modules/commons/normalizing_flow/res_flow.py b/NeuralSeq/modules/commons/normalizing_flow/res_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..d0d13285704543ec28fe37d82346011240bdcaf8 --- /dev/null +++ b/NeuralSeq/modules/commons/normalizing_flow/res_flow.py @@ -0,0 +1,61 @@ +import torch +from torch import nn +from modules.commons.conv import ConditionalConvBlocks +from modules.commons.wavenet import WN + + +class FlipLayer(nn.Module): + def forward(self, x, nonpadding, cond=None, reverse=False): + x = torch.flip(x, [1]) + return x + + +class CouplingLayer(nn.Module): + def __init__(self, c_in, hidden_size, kernel_size, n_layers, p_dropout=0, c_in_g=0, nn_type='wn'): + super().__init__() + self.channels = c_in + self.hidden_size = hidden_size + self.kernel_size = kernel_size + self.n_layers = n_layers + self.c_half = c_in // 2 + + self.pre = nn.Conv1d(self.c_half, hidden_size, 1) + if nn_type == 'wn': + self.enc = WN(hidden_size, kernel_size, 1, n_layers, p_dropout=p_dropout, + c_cond=c_in_g) + elif nn_type == 'conv': + self.enc = ConditionalConvBlocks( + hidden_size, c_in_g, hidden_size, None, kernel_size, + layers_in_block=1, is_BTC=False, num_layers=n_layers) + self.post = nn.Conv1d(hidden_size, self.c_half, 1) + + def forward(self, x, nonpadding, cond=None, reverse=False): + x0, x1 = x[:, :self.c_half], x[:, self.c_half:] + x_ = self.pre(x0) * nonpadding + x_ = self.enc(x_, nonpadding=nonpadding, cond=cond) + m = self.post(x_) + x1 = m + x1 if not reverse else x1 - m + x = torch.cat([x0, x1], 1) + return x * nonpadding + + +class ResFlow(nn.Module): + def __init__(self, + c_in, + hidden_size, + kernel_size, + n_flow_layers, + n_flow_steps=4, + c_cond=0, + nn_type='wn'): + super().__init__() + self.flows = nn.ModuleList() + for i in range(n_flow_steps): + self.flows.append( + CouplingLayer(c_in, hidden_size, kernel_size, n_flow_layers, c_in_g=c_cond, nn_type=nn_type)) + self.flows.append(FlipLayer()) + + def forward(self, x, nonpadding, cond=None, reverse=False): + for flow in (self.flows if not reverse else reversed(self.flows)): + x = flow(x, nonpadding, cond=cond, reverse=reverse) + return x diff --git a/NeuralSeq/modules/commons/normalizing_flow/utils.py b/NeuralSeq/modules/commons/normalizing_flow/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7eb56ec514bff822ba1a19a6474207ed82492410 --- /dev/null +++ b/NeuralSeq/modules/commons/normalizing_flow/utils.py @@ -0,0 +1,29 @@ +import torch + + +def squeeze(x, x_mask=None, n_sqz=2): + b, c, t = x.size() + + t = (t // n_sqz) * n_sqz + x = x[:, :, :t] + x_sqz = x.view(b, c, t // n_sqz, n_sqz) + x_sqz = x_sqz.permute(0, 3, 1, 2).contiguous().view(b, c * n_sqz, t // n_sqz) + + if x_mask is not None: + x_mask = x_mask[:, :, n_sqz - 1::n_sqz] + else: + x_mask = torch.ones(b, 1, t // n_sqz).to(device=x.device, dtype=x.dtype) + return x_sqz * x_mask, x_mask + + +def unsqueeze(x, x_mask=None, n_sqz=2): + b, c, t = x.size() + + x_unsqz = x.view(b, n_sqz, c // n_sqz, t) + x_unsqz = x_unsqz.permute(0, 2, 3, 1).contiguous().view(b, c // n_sqz, t * n_sqz) + + if x_mask is not None: + x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, n_sqz).view(b, 1, t * n_sqz) + else: + x_mask = torch.ones(b, 1, t * n_sqz).to(device=x.device, dtype=x.dtype) + return x_unsqz * x_mask, x_mask diff --git a/NeuralSeq/modules/commons/rel_transformer.py b/NeuralSeq/modules/commons/rel_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..ed69e587f9813fc1214dc034f8cabf238e362b61 --- /dev/null +++ b/NeuralSeq/modules/commons/rel_transformer.py @@ -0,0 +1,611 @@ +import math +import torch +from torch import nn +from torch.nn import functional as F +from utils.hparams import hparams +from modules.commons.common_layers import Embedding +from utils.tts_utils import group_hidden_by_segs, expand_word2ph + +import transformers + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def shift_1d(x): + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] + return x + + +def sequence_mask(length, max_length=None): + if max_length is None: + max_length = length.max() + x = torch.arange(max_length, dtype=length.dtype, device=length.device) + return x.unsqueeze(0) < length.unsqueeze(1) + + +class Encoder(nn.Module): + def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., + window_size=None, block_length=None, pre_ln=False, **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + self.block_length = block_length + self.pre_ln = pre_ln + + self.drop = nn.Dropout(p_dropout) + self.attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.attn_layers.append( + MultiHeadAttention(hidden_channels, hidden_channels, n_heads, window_size=window_size, + p_dropout=p_dropout, block_length=block_length)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + if pre_ln: + self.last_ln = LayerNorm(hidden_channels) + + def forward(self, x, x_mask): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + for i in range(self.n_layers): + x = x * x_mask + x_ = x + if self.pre_ln: + x = self.norm_layers_1[i](x) + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = x_ + y + if not self.pre_ln: + x = self.norm_layers_1[i](x) + + x_ = x + if self.pre_ln: + x = self.norm_layers_2[i](x) + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = x_ + y + if not self.pre_ln: + x = self.norm_layers_2[i](x) + if self.pre_ln: + x = self.last_ln(x) + x = x * x_mask + return x + + +class MultiHeadAttention(nn.Module): + def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0., + block_length=None, proximal_bias=False, proximal_init=False): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.window_size = window_size + self.heads_share = heads_share + self.block_length = block_length + self.proximal_bias = proximal_bias + self.p_dropout = p_dropout + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = nn.Conv1d(channels, channels, 1) + self.conv_k = nn.Conv1d(channels, channels, 1) + self.conv_v = nn.Conv1d(channels, channels, 1) + if window_size is not None: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels ** -0.5 + self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + self.conv_o = nn.Conv1d(channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + nn.init.xavier_uniform_(self.conv_q.weight) + nn.init.xavier_uniform_(self.conv_k.weight) + if proximal_init: + self.conv_k.weight.data.copy_(self.conv_q.weight.data) + self.conv_k.bias.data.copy_(self.conv_q.bias.data) + nn.init.xavier_uniform_(self.conv_v.weight) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + # reshape [b, d, t] -> [b, n_h, t, d_k] + b, d, t_s, t_t = (*key.size(), query.size(2)) + query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + + scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels) + if self.window_size is not None: + assert t_s == t_t, "Relative attention is only available for self-attention." + key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) + rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) + rel_logits = self._relative_position_to_absolute_position(rel_logits) + scores_local = rel_logits / math.sqrt(self.k_channels) + scores = scores + scores_local + if self.proximal_bias: + assert t_s == t_t, "Proximal bias is only available for self-attention." + scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length is not None: + block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) + scores = scores * block_mask + -1e4 * (1 - block_mask) + p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + if self.window_size is not None: + relative_weights = self._absolute_position_to_relative_position(p_attn) + value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) + output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) + output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] + return output, p_attn + + def _matmul_with_relative_values(self, x, y): + """ + x: [b, h, l, m] + y: [h or 1, m, d] + ret: [b, h, l, d] + """ + ret = torch.matmul(x, y.unsqueeze(0)) + return ret + + def _matmul_with_relative_keys(self, x, y): + """ + x: [b, h, l, d] + y: [h or 1, m, d] + ret: [b, h, l, m] + """ + ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + return ret + + def _get_relative_embeddings(self, relative_embeddings, length): + max_relative_position = 2 * self.window_size + 1 + # Pad first before slice to avoid using cond ops. + pad_length = max(length - (self.window_size + 1), 0) + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + if pad_length > 0: + padded_relative_embeddings = F.pad( + relative_embeddings, + convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) + else: + padded_relative_embeddings = relative_embeddings + used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] + return used_relative_embeddings + + def _relative_position_to_absolute_position(self, x): + """ + x: [b, h, l, 2*l-1] + ret: [b, h, l, l] + """ + batch, heads, length, _ = x.size() + # Concat columns of pad to shift from relative to absolute indexing. + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + x_flat = x.view([batch, heads, length * 2 * length]) + x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])) + + # Reshape and slice out the padded elements. + x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] + return x_final + + def _absolute_position_to_relative_position(self, x): + """ + x: [b, h, l, l] + ret: [b, h, l, 2*l-1] + """ + batch, heads, length, _ = x.size() + # padd along column + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) + x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] + return x_final + + def _attention_bias_proximal(self, length): + """Bias for self-attention to encourage attention to close positions. + Args: + length: an integer scalar. + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + + +class FFN(nn.Module): + def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.activation = activation + + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) + self.conv_2 = nn.Conv1d(filter_channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(x * x_mask) + if self.activation == "gelu": + x = x * torch.sigmoid(1.702 * x) + else: + x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(x * x_mask) + return x * x_mask + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-4): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + n_dims = len(x.shape) + mean = torch.mean(x, 1, keepdim=True) + variance = torch.mean((x - mean) ** 2, 1, keepdim=True) + + x = (x - mean) * torch.rsqrt(variance + self.eps) + + shape = [1, -1] + [1] * (n_dims - 2) + x = x * self.gamma.view(*shape) + self.beta.view(*shape) + return x + + +class ConvReluNorm(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + assert n_layers > 1, "Number of layers should be larger than 0." + + self.conv_layers = nn.ModuleList() + self.norm_layers = nn.ModuleList() + self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.relu_drop = nn.Sequential( + nn.ReLU(), + nn.Dropout(p_dropout)) + for _ in range(n_layers - 1): + self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask): + x_org = x + for i in range(self.n_layers): + x = self.conv_layers[i](x * x_mask) + x = self.norm_layers[i](x) + x = self.relu_drop(x) + x = x_org + self.proj(x) + return x * x_mask + + +class RelTransformerEncoder(nn.Module): + def __init__(self, + n_vocab, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout=0.0, + window_size=4, + block_length=None, + prenet=True, + pre_ln=True, + ): + + super().__init__() + + self.n_vocab = n_vocab + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + self.block_length = block_length + self.prenet = prenet + if n_vocab > 0: + self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0) + + if prenet: + self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels, + kernel_size=5, n_layers=3, p_dropout=0) + self.encoder = Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + window_size=window_size, + block_length=block_length, + pre_ln=pre_ln, + ) + + def forward(self, x, x_mask=None): + if self.n_vocab > 0: + x_lengths = (x > 0).long().sum(-1) + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + else: + x_lengths = (x.abs().sum(-1) > 0).long().sum(-1) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + + if self.prenet: + x = self.pre(x, x_mask) + x = self.encoder(x, x_mask) + return x.transpose(1, 2) + + +class Pooler(nn.Module): + """ + Parameter-free poolers to get the sentence embedding + 'cls': [CLS] representation with BERT/RoBERTa's MLP pooler. + 'cls_before_pooler': [CLS] representation without the original MLP pooler. + 'avg': average of the last layers' hidden states at each token. + 'avg_top2': average of the last two layers. + 'avg_first_last': average of the first and the last layers. + """ + def __init__(self, pooler_type): + super().__init__() + self.pooler_type = pooler_type + assert self.pooler_type in ["cls", "cls_before_pooler", "avg", "avg_top2", "avg_first_last"], "unrecognized pooling type %s" % self.pooler_type + + def forward(self, attention_mask, outputs): + last_hidden = outputs.last_hidden_state + pooler_output = outputs.pooler_output + hidden_states = outputs.hidden_states + + if self.pooler_type in ['cls_before_pooler', 'cls']: + return last_hidden[:, 0] + elif self.pooler_type == "avg": + return ((last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)) + elif self.pooler_type == "avg_first_last": + first_hidden = hidden_states[0] + last_hidden = hidden_states[-1] + pooled_result = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1) + return pooled_result + elif self.pooler_type == "avg_top2": + second_last_hidden = hidden_states[-2] + last_hidden = hidden_states[-1] + pooled_result = ((last_hidden + second_last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1) + return pooled_result + else: + raise NotImplementedError + + +class Similarity(nn.Module): + """ + Dot product or cosine similarity + """ + + def __init__(self, temp): + super().__init__() + self.temp = temp + self.cos = nn.CosineSimilarity(dim=-1) + self.record = None + self.pos_avg = 0.0 + self.neg_avg = 0.0 + + def forward(self, x, y): + sim = self.cos(x, y) + self.record = sim.detach() # [64,64] + min_size = min(self.record.shape[0], self.record.shape[1]) # 64 + num_item = self.record.shape[0] * self.record.shape[1] # 4096 + self.pos_avg = self.record.diag().sum() / min_size + if num_item - min_size == 0: + self.neg_avg = (self.record.sum() - self.record.diag().sum()) / 1 + return sim / self.temp + if torch.any(torch.isnan(self.record)).item() is True: + print("we got self.record has nan when compute neg_avg") + if torch.any(torch.isnan(self.record.diag())).item() is True: + print("we got self.record.diag() has nan when compute neg_avg") + self.neg_avg = (self.record.sum() - self.record.diag().sum()) / (num_item - min_size) + + return sim / self.temp + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, hidden_size): + super().__init__() + self.dense = nn.Linear(hidden_size, hidden_size) + self.transform_act_fn = F.gelu + self.LayerNorm = nn.LayerNorm(hidden_size, eps=1e-12) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, hid_dim, out_dim): + super().__init__() + self.transform = BertPredictionHeadTransform(hid_dim) + self.decoder = nn.Linear(hid_dim, out_dim, bias=False) + self.bias = nn.Parameter(torch.zeros(out_dim)) + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +# V2_2 +# change add to concat. +# now support finetune BERT +# grad_bert=0.1 & trainable_block_idx=0 +class BERTRelTransformerEncoder(nn.Module): + def __init__(self, + n_vocab, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout=0.0, + window_size=4, + block_length=None, + prenet=True, + pre_ln=True, + ): + + super().__init__() + + self.n_vocab = n_vocab + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + self.block_length = block_length + self.prenet = prenet + if n_vocab > 0: + self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0) + + if prenet: + self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels, + kernel_size=5, n_layers=3, p_dropout=0) + self.encoder1 = Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers//2, + kernel_size, + p_dropout, + window_size=window_size, + block_length=block_length, + pre_ln=pre_ln, + ) + + self.encoder2 = Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers - n_layers//2, + kernel_size, + p_dropout, + window_size=window_size, + block_length=block_length, + pre_ln=pre_ln, + ) + + if hparams['ds_name'] in ['ljspeech', 'libritts', 'librispeech']: + model_name = 'bert-base-uncased' + elif hparams['ds_name'] in ['biaobei', 'wenetspeech']: + model_name = 'bert-base-chinese' + else: + raise NotImplementedError() + + self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_name) + config = transformers.AutoConfig.from_pretrained(model_name) + if hparams.get("load_bert_from_pretrained", True): + print("Load BERT from pretrained model ...") + self.bert = transformers.AutoModel.from_pretrained(model_name,config=config) + trainable_start_block = hparams.get("bert_trainable_start_block", 0) + else: + print("Initialize BERT from scratch!") + self.bert = transformers.BertModel(config=config) + trainable_start_block = 0 + + for k, v in self.bert.named_parameters(): + if 'embeddings' in k: + v.requires_grad = False + elif 'encoder.layer' in k: + block_idx = int(k.split(".")[2]) + if block_idx < trainable_start_block: + v.requires_grad = False + else: + v.requires_grad = True + elif 'cls' in k: + v.requires_grad = True + else: + print("Unhandled key: {}, set to requires_grad...".format(k)) + v.requires_grad = True + + self.bert_combine = nn.Sequential(*[ + nn.Conv1d(768 + hidden_channels, hidden_channels, 3, 1, 1), + nn.ReLU(), + ]) + self.pooler = Pooler("avg") + self.sim = Similarity(temp=0.05) + + def forward(self, x, x_mask=None, bert_feats=None, ph2word=None, **kwargs): + if self.n_vocab > 0: + x_lengths = (x > 0).long().sum(-1) + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + else: + x_lengths = (x.abs().sum(-1) > 0).long().sum(-1) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + + if self.prenet: + x = self.pre(x, x_mask) + x = self.encoder1(x, x_mask) + bert_outputs = self.bert(bert_feats['bert_input_ids'], + attention_mask=bert_feats['bert_attention_mask'], + token_type_ids=bert_feats['bert_token_type_ids'], + output_hidden_states=True) + bert_num_blocks = hparams.get("bert_num_blocks", 12) # total 1+12blocks in bert + bert_embedding = bert_outputs['hidden_states'][bert_num_blocks] + # bert_embedding = bert_outputs['last_hidden_state'] + grad_bert = hparams.get("grad_bert", 0.1) + bert_embedding = bert_embedding.detach() * (1-grad_bert) + bert_embedding * grad_bert + bert_word_embedding, _ = group_hidden_by_segs(bert_embedding, bert_feats['bert_token2word'], bert_feats['bert_token2word'].max().item()) + bert_ph_embedding = expand_word2ph(bert_word_embedding, ph2word) + bert_ph_embedding = bert_ph_embedding.transpose(1,2) + x = torch.cat([x, bert_ph_embedding], dim=1) + x = self.bert_combine(x) + x = self.encoder2(x, x_mask) + return x.transpose(1, 2) + + diff --git a/NeuralSeq/modules/commons/ssim.py b/NeuralSeq/modules/commons/ssim.py new file mode 100644 index 0000000000000000000000000000000000000000..0d0241f267ef58b24979e022b05f2a9adf768826 --- /dev/null +++ b/NeuralSeq/modules/commons/ssim.py @@ -0,0 +1,391 @@ +# ''' +# https://github.com/One-sixth/ms_ssim_pytorch/blob/master/ssim.py +# ''' +# +# import torch +# import torch.jit +# import torch.nn.functional as F +# +# +# @torch.jit.script +# def create_window(window_size: int, sigma: float, channel: int): +# ''' +# Create 1-D gauss kernel +# :param window_size: the size of gauss kernel +# :param sigma: sigma of normal distribution +# :param channel: input channel +# :return: 1D kernel +# ''' +# coords = torch.arange(window_size, dtype=torch.float) +# coords -= window_size // 2 +# +# g = torch.exp(-(coords ** 2) / (2 * sigma ** 2)) +# g /= g.sum() +# +# g = g.reshape(1, 1, 1, -1).repeat(channel, 1, 1, 1) +# return g +# +# +# @torch.jit.script +# def _gaussian_filter(x, window_1d, use_padding: bool): +# ''' +# Blur input with 1-D kernel +# :param x: batch of tensors to be blured +# :param window_1d: 1-D gauss kernel +# :param use_padding: padding image before conv +# :return: blured tensors +# ''' +# C = x.shape[1] +# padding = 0 +# if use_padding: +# window_size = window_1d.shape[3] +# padding = window_size // 2 +# out = F.conv2d(x, window_1d, stride=1, padding=(0, padding), groups=C) +# out = F.conv2d(out, window_1d.transpose(2, 3), stride=1, padding=(padding, 0), groups=C) +# return out +# +# +# @torch.jit.script +# def ssim(X, Y, window, data_range: float, use_padding: bool = False): +# ''' +# Calculate ssim index for X and Y +# :param X: images [B, C, H, N_bins] +# :param Y: images [B, C, H, N_bins] +# :param window: 1-D gauss kernel +# :param data_range: value range of input images. (usually 1.0 or 255) +# :param use_padding: padding image before conv +# :return: +# ''' +# +# K1 = 0.01 +# K2 = 0.03 +# compensation = 1.0 +# +# C1 = (K1 * data_range) ** 2 +# C2 = (K2 * data_range) ** 2 +# +# mu1 = _gaussian_filter(X, window, use_padding) +# mu2 = _gaussian_filter(Y, window, use_padding) +# sigma1_sq = _gaussian_filter(X * X, window, use_padding) +# sigma2_sq = _gaussian_filter(Y * Y, window, use_padding) +# sigma12 = _gaussian_filter(X * Y, window, use_padding) +# +# mu1_sq = mu1.pow(2) +# mu2_sq = mu2.pow(2) +# mu1_mu2 = mu1 * mu2 +# +# sigma1_sq = compensation * (sigma1_sq - mu1_sq) +# sigma2_sq = compensation * (sigma2_sq - mu2_sq) +# sigma12 = compensation * (sigma12 - mu1_mu2) +# +# cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2) +# # Fixed the issue that the negative value of cs_map caused ms_ssim to output Nan. +# cs_map = cs_map.clamp_min(0.) +# ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map +# +# ssim_val = ssim_map.mean(dim=(1, 2, 3)) # reduce along CHW +# cs = cs_map.mean(dim=(1, 2, 3)) +# +# return ssim_val, cs +# +# +# @torch.jit.script +# def ms_ssim(X, Y, window, data_range: float, weights, use_padding: bool = False, eps: float = 1e-8): +# ''' +# interface of ms-ssim +# :param X: a batch of images, (N,C,H,W) +# :param Y: a batch of images, (N,C,H,W) +# :param window: 1-D gauss kernel +# :param data_range: value range of input images. (usually 1.0 or 255) +# :param weights: weights for different levels +# :param use_padding: padding image before conv +# :param eps: use for avoid grad nan. +# :return: +# ''' +# levels = weights.shape[0] +# cs_vals = [] +# ssim_vals = [] +# for _ in range(levels): +# ssim_val, cs = ssim(X, Y, window=window, data_range=data_range, use_padding=use_padding) +# # Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf. +# ssim_val = ssim_val.clamp_min(eps) +# cs = cs.clamp_min(eps) +# cs_vals.append(cs) +# +# ssim_vals.append(ssim_val) +# padding = (X.shape[2] % 2, X.shape[3] % 2) +# X = F.avg_pool2d(X, kernel_size=2, stride=2, padding=padding) +# Y = F.avg_pool2d(Y, kernel_size=2, stride=2, padding=padding) +# +# cs_vals = torch.stack(cs_vals, dim=0) +# ms_ssim_val = torch.prod((cs_vals[:-1] ** weights[:-1].unsqueeze(1)) * (ssim_vals[-1] ** weights[-1]), dim=0) +# return ms_ssim_val +# +# +# class SSIM(torch.jit.ScriptModule): +# __constants__ = ['data_range', 'use_padding'] +# +# def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False): +# ''' +# :param window_size: the size of gauss kernel +# :param window_sigma: sigma of normal distribution +# :param data_range: value range of input images. (usually 1.0 or 255) +# :param channel: input channels (default: 3) +# :param use_padding: padding image before conv +# ''' +# super().__init__() +# assert window_size % 2 == 1, 'Window size must be odd.' +# window = create_window(window_size, window_sigma, channel) +# self.register_buffer('window', window) +# self.data_range = data_range +# self.use_padding = use_padding +# +# @torch.jit.script_method +# def forward(self, X, Y): +# r = ssim(X, Y, window=self.window, data_range=self.data_range, use_padding=self.use_padding) +# return r[0] +# +# +# class MS_SSIM(torch.jit.ScriptModule): +# __constants__ = ['data_range', 'use_padding', 'eps'] +# +# def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False, weights=None, +# levels=None, eps=1e-8): +# ''' +# class for ms-ssim +# :param window_size: the size of gauss kernel +# :param window_sigma: sigma of normal distribution +# :param data_range: value range of input images. (usually 1.0 or 255) +# :param channel: input channels +# :param use_padding: padding image before conv +# :param weights: weights for different levels. (default [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]) +# :param levels: number of downsampling +# :param eps: Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf. +# ''' +# super().__init__() +# assert window_size % 2 == 1, 'Window size must be odd.' +# self.data_range = data_range +# self.use_padding = use_padding +# self.eps = eps +# +# window = create_window(window_size, window_sigma, channel) +# self.register_buffer('window', window) +# +# if weights is None: +# weights = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333] +# weights = torch.tensor(weights, dtype=torch.float) +# +# if levels is not None: +# weights = weights[:levels] +# weights = weights / weights.sum() +# +# self.register_buffer('weights', weights) +# +# @torch.jit.script_method +# def forward(self, X, Y): +# return ms_ssim(X, Y, window=self.window, data_range=self.data_range, weights=self.weights, +# use_padding=self.use_padding, eps=self.eps) +# +# +# if __name__ == '__main__': +# print('Simple Test') +# im = torch.randint(0, 255, (5, 3, 256, 256), dtype=torch.float, device='cuda') +# img1 = im / 255 +# img2 = img1 * 0.5 +# +# losser = SSIM(data_range=1.).cuda() +# loss = losser(img1, img2).mean() +# +# losser2 = MS_SSIM(data_range=1.).cuda() +# loss2 = losser2(img1, img2).mean() +# +# print(loss.item()) +# print(loss2.item()) +# +# if __name__ == '__main__': +# print('Training Test') +# import cv2 +# import torch.optim +# import numpy as np +# import imageio +# import time +# +# out_test_video = False +# # 最好不要直接输出gif图,会非常大,最好先输出mkv文件后用ffmpeg转换到GIF +# video_use_gif = False +# +# im = cv2.imread('test_img1.jpg', 1) +# t_im = torch.from_numpy(im).cuda().permute(2, 0, 1).float()[None] / 255. +# +# if out_test_video: +# if video_use_gif: +# fps = 0.5 +# out_wh = (im.shape[1] // 2, im.shape[0] // 2) +# suffix = '.gif' +# else: +# fps = 5 +# out_wh = (im.shape[1], im.shape[0]) +# suffix = '.mkv' +# video_last_time = time.perf_counter() +# video = imageio.get_writer('ssim_test' + suffix, fps=fps) +# +# # 测试ssim +# print('Training SSIM') +# rand_im = torch.randint_like(t_im, 0, 255, dtype=torch.float32) / 255. +# rand_im.requires_grad = True +# optim = torch.optim.Adam([rand_im], 0.003, eps=1e-8) +# losser = SSIM(data_range=1., channel=t_im.shape[1]).cuda() +# ssim_score = 0 +# while ssim_score < 0.999: +# optim.zero_grad() +# loss = losser(rand_im, t_im) +# (-loss).sum().backward() +# ssim_score = loss.item() +# optim.step() +# r_im = np.transpose(rand_im.detach().cpu().numpy().clip(0, 1) * 255, [0, 2, 3, 1]).astype(np.uint8)[0] +# r_im = cv2.putText(r_im, 'ssim %f' % ssim_score, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2) +# +# if out_test_video: +# if time.perf_counter() - video_last_time > 1. / fps: +# video_last_time = time.perf_counter() +# out_frame = cv2.cvtColor(r_im, cv2.COLOR_BGR2RGB) +# out_frame = cv2.resize(out_frame, out_wh, interpolation=cv2.INTER_AREA) +# if isinstance(out_frame, cv2.UMat): +# out_frame = out_frame.get() +# video.append_data(out_frame) +# +# cv2.imshow('ssim', r_im) +# cv2.setWindowTitle('ssim', 'ssim %f' % ssim_score) +# cv2.waitKey(1) +# +# if out_test_video: +# video.close() +# +# # 测试ms_ssim +# if out_test_video: +# if video_use_gif: +# fps = 0.5 +# out_wh = (im.shape[1] // 2, im.shape[0] // 2) +# suffix = '.gif' +# else: +# fps = 5 +# out_wh = (im.shape[1], im.shape[0]) +# suffix = '.mkv' +# video_last_time = time.perf_counter() +# video = imageio.get_writer('ms_ssim_test' + suffix, fps=fps) +# +# print('Training MS_SSIM') +# rand_im = torch.randint_like(t_im, 0, 255, dtype=torch.float32) / 255. +# rand_im.requires_grad = True +# optim = torch.optim.Adam([rand_im], 0.003, eps=1e-8) +# losser = MS_SSIM(data_range=1., channel=t_im.shape[1]).cuda() +# ssim_score = 0 +# while ssim_score < 0.999: +# optim.zero_grad() +# loss = losser(rand_im, t_im) +# (-loss).sum().backward() +# ssim_score = loss.item() +# optim.step() +# r_im = np.transpose(rand_im.detach().cpu().numpy().clip(0, 1) * 255, [0, 2, 3, 1]).astype(np.uint8)[0] +# r_im = cv2.putText(r_im, 'ms_ssim %f' % ssim_score, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2) +# +# if out_test_video: +# if time.perf_counter() - video_last_time > 1. / fps: +# video_last_time = time.perf_counter() +# out_frame = cv2.cvtColor(r_im, cv2.COLOR_BGR2RGB) +# out_frame = cv2.resize(out_frame, out_wh, interpolation=cv2.INTER_AREA) +# if isinstance(out_frame, cv2.UMat): +# out_frame = out_frame.get() +# video.append_data(out_frame) +# +# cv2.imshow('ms_ssim', r_im) +# cv2.setWindowTitle('ms_ssim', 'ms_ssim %f' % ssim_score) +# cv2.waitKey(1) +# +# if out_test_video: +# video.close() + +""" +Adapted from https://github.com/Po-Hsun-Su/pytorch-ssim +""" + +import torch +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +from math import exp + + +def gaussian(window_size, sigma): + gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)]) + return gauss / gauss.sum() + + +def create_window(window_size, channel): + _1D_window = gaussian(window_size, 1.5).unsqueeze(1) + _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) + window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous()) + return window + + +def _ssim(img1, img2, window, window_size, channel, size_average=True): + mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel) + mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel) + + mu1_sq = mu1.pow(2) + mu2_sq = mu2.pow(2) + mu1_mu2 = mu1 * mu2 + + sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq + sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq + sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2 + + C1 = 0.01 ** 2 + C2 = 0.03 ** 2 + + ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) + + if size_average: + return ssim_map.mean() + else: + return ssim_map.mean(1) + + +class SSIM(torch.nn.Module): + def __init__(self, window_size=11, size_average=True): + super(SSIM, self).__init__() + self.window_size = window_size + self.size_average = size_average + self.channel = 1 + self.window = create_window(window_size, self.channel) + + def forward(self, img1, img2): + (_, channel, _, _) = img1.size() + + if channel == self.channel and self.window.data.type() == img1.data.type(): + window = self.window + else: + window = create_window(self.window_size, channel) + + if img1.is_cuda: + window = window.cuda(img1.get_device()) + window = window.type_as(img1) + + self.window = window + self.channel = channel + + return _ssim(img1, img2, window, self.window_size, channel, self.size_average) + + +window = None + + +def ssim(img1, img2, window_size=11, size_average=True): + (_, channel, _, _) = img1.size() + global window + if window is None: + window = create_window(window_size, channel) + if img1.is_cuda: + window = window.cuda(img1.get_device()) + window = window.type_as(img1) + return _ssim(img1, img2, window, window_size, channel, size_average) diff --git a/NeuralSeq/modules/commons/transformer.py b/NeuralSeq/modules/commons/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..9234b7928450974ae39843aa6c870400a5d31b1c --- /dev/null +++ b/NeuralSeq/modules/commons/transformer.py @@ -0,0 +1,747 @@ +import math +import torch +from torch import nn +from torch.nn import Parameter, Linear +from modules.commons.common_layers import LayerNorm, Embedding +from utils.tts_utils import get_incremental_state, set_incremental_state, softmax, make_positions +import torch.nn.functional as F + +DEFAULT_MAX_SOURCE_POSITIONS = 2000 +DEFAULT_MAX_TARGET_POSITIONS = 2000 + + +class SinusoidalPositionalEmbedding(nn.Module): + """This module produces sinusoidal positional embeddings of any length. + + Padding symbols are ignored. + """ + + def __init__(self, embedding_dim, padding_idx, init_size=1024): + super().__init__() + self.embedding_dim = embedding_dim + self.padding_idx = padding_idx + self.weights = SinusoidalPositionalEmbedding.get_embedding( + init_size, + embedding_dim, + padding_idx, + ) + self.register_buffer('_float_tensor', torch.FloatTensor(1)) + + @staticmethod + def get_embedding(num_embeddings, embedding_dim, padding_idx=None): + """Build sinusoidal embeddings. + + This matches the implementation in tensor2tensor, but differs slightly + from the description in Section 3.5 of "Attention Is All You Need". + """ + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) + emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) + if embedding_dim % 2 == 1: + # zero pad + emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) + if padding_idx is not None: + emb[padding_idx, :] = 0 + return emb + + def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs): + """Input is expected to be of size [bsz x seqlen].""" + bsz, seq_len = input.shape[:2] + max_pos = self.padding_idx + 1 + seq_len + if self.weights is None or max_pos > self.weights.size(0): + # recompute/expand embeddings if needed + self.weights = SinusoidalPositionalEmbedding.get_embedding( + max_pos, + self.embedding_dim, + self.padding_idx, + ) + self.weights = self.weights.to(self._float_tensor) + + if incremental_state is not None: + # positions is the same for every token when decoding a single step + pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len + return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) + + positions = make_positions(input, self.padding_idx) if positions is None else positions + return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach() + + def max_positions(self): + """Maximum number of supported positions.""" + return int(1e5) # an arbitrary large number + + +class TransformerFFNLayer(nn.Module): + def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0., act='gelu'): + super().__init__() + self.kernel_size = kernel_size + self.dropout = dropout + self.act = act + if padding == 'SAME': + self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2) + elif padding == 'LEFT': + self.ffn_1 = nn.Sequential( + nn.ConstantPad1d((kernel_size - 1, 0), 0.0), + nn.Conv1d(hidden_size, filter_size, kernel_size) + ) + self.ffn_2 = Linear(filter_size, hidden_size) + + def forward(self, x, incremental_state=None): + # x: T x B x C + if incremental_state is not None: + saved_state = self._get_input_buffer(incremental_state) + if 'prev_input' in saved_state: + prev_input = saved_state['prev_input'] + x = torch.cat((prev_input, x), dim=0) + x = x[-self.kernel_size:] + saved_state['prev_input'] = x + self._set_input_buffer(incremental_state, saved_state) + + x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1) + x = x * self.kernel_size ** -0.5 + + if incremental_state is not None: + x = x[-1:] + if self.act == 'gelu': + x = F.gelu(x) + if self.act == 'relu': + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + x = self.ffn_2(x) + return x + + def _get_input_buffer(self, incremental_state): + return get_incremental_state( + self, + incremental_state, + 'f', + ) or {} + + def _set_input_buffer(self, incremental_state, buffer): + set_incremental_state( + self, + incremental_state, + 'f', + buffer, + ) + + def clear_buffer(self, incremental_state): + if incremental_state is not None: + saved_state = self._get_input_buffer(incremental_state) + if 'prev_input' in saved_state: + del saved_state['prev_input'] + self._set_input_buffer(incremental_state, saved_state) + + +class MultiheadAttention(nn.Module): + def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True, + add_bias_kv=False, add_zero_attn=False, self_attention=False, + encoder_decoder_attention=False): + super().__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + self.scaling = self.head_dim ** -0.5 + + self.self_attention = self_attention + self.encoder_decoder_attention = encoder_decoder_attention + + assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \ + 'value to be of the same size' + + if self.qkv_same_dim: + self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) + else: + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + + if bias: + self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + if add_bias_kv: + self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) + self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self.reset_parameters() + + self.enable_torch_version = False + if hasattr(F, "multi_head_attention_forward"): + self.enable_torch_version = True + else: + self.enable_torch_version = False + self.last_attn_probs = None + + def reset_parameters(self): + if self.qkv_same_dim: + nn.init.xavier_uniform_(self.in_proj_weight) + else: + nn.init.xavier_uniform_(self.k_proj_weight) + nn.init.xavier_uniform_(self.v_proj_weight) + nn.init.xavier_uniform_(self.q_proj_weight) + + nn.init.xavier_uniform_(self.out_proj.weight) + if self.in_proj_bias is not None: + nn.init.constant_(self.in_proj_bias, 0.) + nn.init.constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + nn.init.xavier_normal_(self.bias_k) + if self.bias_v is not None: + nn.init.xavier_normal_(self.bias_v) + + def forward( + self, + query, key, value, + key_padding_mask=None, + incremental_state=None, + need_weights=True, + static_kv=False, + attn_mask=None, + before_softmax=False, + need_head_weights=False, + enc_dec_attn_constraint_mask=None, + reset_attn_weight=None + ): + """Input shape: Time x Batch x Channel + + Args: + key_padding_mask (ByteTensor, optional): mask to exclude + keys that are pads, of shape `(batch, src_len)`, where + padding elements are indicated by 1s. + need_weights (bool, optional): return the attention weights, + averaged over heads (default: False). + attn_mask (ByteTensor, optional): typically used to + implement causal attention, where the mask prevents the + attention from looking forward in time (default: None). + before_softmax (bool, optional): return the raw attention + weights and values before the attention softmax. + need_head_weights (bool, optional): return the attention + weights for each head. Implies *need_weights*. Default: + return the average attention weights over all heads. + """ + if need_head_weights: + need_weights = True + + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == self.embed_dim + assert list(query.size()) == [tgt_len, bsz, embed_dim] + if self.enable_torch_version and incremental_state is None and not static_kv and reset_attn_weight is None: + if self.qkv_same_dim: + return F.multi_head_attention_forward(query, key, value, + self.embed_dim, self.num_heads, + self.in_proj_weight, + self.in_proj_bias, self.bias_k, self.bias_v, + self.add_zero_attn, self.dropout, + self.out_proj.weight, self.out_proj.bias, + self.training, key_padding_mask, need_weights, + attn_mask) + else: + return F.multi_head_attention_forward(query, key, value, + self.embed_dim, self.num_heads, + torch.empty([0]), + self.in_proj_bias, self.bias_k, self.bias_v, + self.add_zero_attn, self.dropout, + self.out_proj.weight, self.out_proj.bias, + self.training, key_padding_mask, need_weights, + attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, + k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight) + + if incremental_state is not None: + saved_state = self._get_input_buffer(incremental_state) + if 'prev_key' in saved_state: + # previous time steps are cached - no need to recompute + # key and value if they are static + if static_kv: + assert self.encoder_decoder_attention and not self.self_attention + key = value = None + else: + saved_state = None + + if self.self_attention: + # self-attention + q, k, v = self.in_proj_qkv(query) + elif self.encoder_decoder_attention: + # encoder-decoder attention + q = self.in_proj_q(query) + if key is None: + assert value is None + k = v = None + else: + k = self.in_proj_k(key) + v = self.in_proj_v(key) + + else: + q = self.in_proj_q(query) + k = self.in_proj_k(key) + v = self.in_proj_v(value) + q *= self.scaling + + if self.bias_k is not None: + assert self.bias_v is not None + k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1) + if key_padding_mask is not None: + key_padding_mask = torch.cat( + [key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1) + + q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) + + if saved_state is not None: + # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) + if 'prev_key' in saved_state: + prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim) + if static_kv: + k = prev_key + else: + k = torch.cat((prev_key, k), dim=1) + if 'prev_value' in saved_state: + prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim) + if static_kv: + v = prev_value + else: + v = torch.cat((prev_value, v), dim=1) + if 'prev_key_padding_mask' in saved_state and saved_state['prev_key_padding_mask'] is not None: + prev_key_padding_mask = saved_state['prev_key_padding_mask'] + if static_kv: + key_padding_mask = prev_key_padding_mask + else: + key_padding_mask = torch.cat((prev_key_padding_mask, key_padding_mask), dim=1) + + saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.head_dim) + saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim) + saved_state['prev_key_padding_mask'] = key_padding_mask + + self._set_input_buffer(incremental_state, saved_state) + + src_len = k.size(1) + + # This is part of a workaround to get around fork/join parallelism + # not supporting Optional types. + if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]): + key_padding_mask = None + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if self.add_zero_attn: + src_len += 1 + k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) + v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) + if attn_mask is not None: + attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1) + if key_padding_mask is not None: + key_padding_mask = torch.cat( + [key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1) + + attn_weights = torch.bmm(q, k.transpose(1, 2)) + attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) + + assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] + + if attn_mask is not None: + if len(attn_mask.shape) == 2: + attn_mask = attn_mask.unsqueeze(0) + elif len(attn_mask.shape) == 3: + attn_mask = attn_mask[:, None].repeat([1, self.num_heads, 1, 1]).reshape( + bsz * self.num_heads, tgt_len, src_len) + attn_weights = attn_weights + attn_mask + + if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.masked_fill( + enc_dec_attn_constraint_mask.unsqueeze(2).bool(), + -1e8, + ) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if key_padding_mask is not None: + # don't attend to padding symbols + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + -1e8, + ) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + + if before_softmax: + return attn_weights, v + + attn_weights_float = softmax(attn_weights, dim=-1) + attn_weights = attn_weights_float.type_as(attn_weights) + attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training) + + if reset_attn_weight is not None: + if reset_attn_weight: + self.last_attn_probs = attn_probs.detach() + else: + assert self.last_attn_probs is not None + attn_probs = self.last_attn_probs + attn = torch.bmm(attn_probs, v) + assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] + attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn = self.out_proj(attn) + + if need_weights: + attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0) + if not need_head_weights: + # average attention weights over heads + attn_weights = attn_weights.mean(dim=0) + else: + attn_weights = None + + return attn, (attn_weights, attn_logits) + + def in_proj_qkv(self, query): + return self._in_proj(query).chunk(3, dim=-1) + + def in_proj_q(self, query): + if self.qkv_same_dim: + return self._in_proj(query, end=self.embed_dim) + else: + bias = self.in_proj_bias + if bias is not None: + bias = bias[:self.embed_dim] + return F.linear(query, self.q_proj_weight, bias) + + def in_proj_k(self, key): + if self.qkv_same_dim: + return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) + else: + weight = self.k_proj_weight + bias = self.in_proj_bias + if bias is not None: + bias = bias[self.embed_dim:2 * self.embed_dim] + return F.linear(key, weight, bias) + + def in_proj_v(self, value): + if self.qkv_same_dim: + return self._in_proj(value, start=2 * self.embed_dim) + else: + weight = self.v_proj_weight + bias = self.in_proj_bias + if bias is not None: + bias = bias[2 * self.embed_dim:] + return F.linear(value, weight, bias) + + def _in_proj(self, input, start=0, end=None): + weight = self.in_proj_weight + bias = self.in_proj_bias + weight = weight[start:end, :] + if bias is not None: + bias = bias[start:end] + return F.linear(input, weight, bias) + + def _get_input_buffer(self, incremental_state): + return get_incremental_state( + self, + incremental_state, + 'attn_state', + ) or {} + + def _set_input_buffer(self, incremental_state, buffer): + set_incremental_state( + self, + incremental_state, + 'attn_state', + buffer, + ) + + def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz): + return attn_weights + + def clear_buffer(self, incremental_state=None): + if incremental_state is not None: + saved_state = self._get_input_buffer(incremental_state) + if 'prev_key' in saved_state: + del saved_state['prev_key'] + if 'prev_value' in saved_state: + del saved_state['prev_value'] + self._set_input_buffer(incremental_state, saved_state) + + +class EncSALayer(nn.Module): + def __init__(self, c, num_heads, dropout, attention_dropout=0.1, + relu_dropout=0.1, kernel_size=9, padding='SAME', act='gelu'): + super().__init__() + self.c = c + self.dropout = dropout + self.num_heads = num_heads + if num_heads > 0: + self.layer_norm1 = LayerNorm(c) + self.self_attn = MultiheadAttention( + self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False) + self.layer_norm2 = LayerNorm(c) + self.ffn = TransformerFFNLayer( + c, 4 * c, kernel_size=kernel_size, dropout=relu_dropout, padding=padding, act=act) + + def forward(self, x, encoder_padding_mask=None, **kwargs): + layer_norm_training = kwargs.get('layer_norm_training', None) + if layer_norm_training is not None: + self.layer_norm1.training = layer_norm_training + self.layer_norm2.training = layer_norm_training + if self.num_heads > 0: + residual = x + x = self.layer_norm1(x) + x, _, = self.self_attn( + query=x, + key=x, + value=x, + key_padding_mask=encoder_padding_mask + ) + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None] + + residual = x + x = self.layer_norm2(x) + x = self.ffn(x) + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None] + return x + + +class DecSALayer(nn.Module): + def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1, + kernel_size=9, act='gelu'): + super().__init__() + self.c = c + self.dropout = dropout + self.layer_norm1 = LayerNorm(c) + self.self_attn = MultiheadAttention( + c, num_heads, self_attention=True, dropout=attention_dropout, bias=False + ) + self.layer_norm2 = LayerNorm(c) + self.encoder_attn = MultiheadAttention( + c, num_heads, encoder_decoder_attention=True, dropout=attention_dropout, bias=False, + ) + self.layer_norm3 = LayerNorm(c) + self.ffn = TransformerFFNLayer( + c, 4 * c, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout, act=act) + + def forward( + self, + x, + encoder_out=None, + encoder_padding_mask=None, + incremental_state=None, + self_attn_mask=None, + self_attn_padding_mask=None, + attn_out=None, + reset_attn_weight=None, + **kwargs, + ): + layer_norm_training = kwargs.get('layer_norm_training', None) + if layer_norm_training is not None: + self.layer_norm1.training = layer_norm_training + self.layer_norm2.training = layer_norm_training + self.layer_norm3.training = layer_norm_training + residual = x + x = self.layer_norm1(x) + x, _ = self.self_attn( + query=x, + key=x, + value=x, + key_padding_mask=self_attn_padding_mask, + incremental_state=incremental_state, + attn_mask=self_attn_mask + ) + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + + attn_logits = None + if encoder_out is not None or attn_out is not None: + residual = x + x = self.layer_norm2(x) + if encoder_out is not None: + x, attn = self.encoder_attn( + query=x, + key=encoder_out, + value=encoder_out, + key_padding_mask=encoder_padding_mask, + incremental_state=incremental_state, + static_kv=True, + enc_dec_attn_constraint_mask=get_incremental_state(self, incremental_state, + 'enc_dec_attn_constraint_mask'), + reset_attn_weight=reset_attn_weight + ) + attn_logits = attn[1] + elif attn_out is not None: + x = self.encoder_attn.in_proj_v(attn_out) + if encoder_out is not None or attn_out is not None: + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + + residual = x + x = self.layer_norm3(x) + x = self.ffn(x, incremental_state=incremental_state) + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + return x, attn_logits + + def clear_buffer(self, input, encoder_out=None, encoder_padding_mask=None, incremental_state=None): + self.encoder_attn.clear_buffer(incremental_state) + self.ffn.clear_buffer(incremental_state) + + def set_buffer(self, name, tensor, incremental_state): + return set_incremental_state(self, incremental_state, name, tensor) + + +class TransformerEncoderLayer(nn.Module): + def __init__(self, hidden_size, dropout, kernel_size=9, num_heads=2): + super().__init__() + self.hidden_size = hidden_size + self.dropout = dropout + self.num_heads = num_heads + self.op = EncSALayer( + hidden_size, num_heads, dropout=dropout, + attention_dropout=0.0, relu_dropout=dropout, + kernel_size=kernel_size) + + def forward(self, x, **kwargs): + return self.op(x, **kwargs) + + +class TransformerDecoderLayer(nn.Module): + def __init__(self, hidden_size, dropout, kernel_size=9, num_heads=2): + super().__init__() + self.hidden_size = hidden_size + self.dropout = dropout + self.num_heads = num_heads + self.op = DecSALayer( + hidden_size, num_heads, dropout=dropout, + attention_dropout=0.0, relu_dropout=dropout, + kernel_size=kernel_size) + + def forward(self, x, **kwargs): + return self.op(x, **kwargs) + + def clear_buffer(self, *args): + return self.op.clear_buffer(*args) + + def set_buffer(self, *args): + return self.op.set_buffer(*args) + + +class FFTBlocks(nn.Module): + def __init__(self, hidden_size, num_layers, ffn_kernel_size=9, dropout=0.0, + num_heads=2, use_pos_embed=True, use_last_norm=True, + use_pos_embed_alpha=True): + super().__init__() + self.num_layers = num_layers + embed_dim = self.hidden_size = hidden_size + self.dropout = dropout + self.use_pos_embed = use_pos_embed + self.use_last_norm = use_last_norm + if use_pos_embed: + self.max_source_positions = DEFAULT_MAX_TARGET_POSITIONS + self.padding_idx = 0 + self.pos_embed_alpha = nn.Parameter(torch.Tensor([1])) if use_pos_embed_alpha else 1 + self.embed_positions = SinusoidalPositionalEmbedding( + embed_dim, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS, + ) + + self.layers = nn.ModuleList([]) + self.layers.extend([ + TransformerEncoderLayer(self.hidden_size, self.dropout, + kernel_size=ffn_kernel_size, num_heads=num_heads) + for _ in range(self.num_layers) + ]) + if self.use_last_norm: + self.layer_norm = nn.LayerNorm(embed_dim) + else: + self.layer_norm = None + + def forward(self, x, padding_mask=None, attn_mask=None, return_hiddens=False): + """ + :param x: [B, T, C] + :param padding_mask: [B, T] + :return: [B, T, C] or [L, B, T, C] + """ + padding_mask = x.abs().sum(-1).eq(0).data if padding_mask is None else padding_mask + nonpadding_mask_TB = 1 - padding_mask.transpose(0, 1).float()[:, :, None] # [T, B, 1] + if self.use_pos_embed: + positions = self.pos_embed_alpha * self.embed_positions(x[..., 0]) + x = x + positions + x = F.dropout(x, p=self.dropout, training=self.training) + # B x T x C -> T x B x C + x = x.transpose(0, 1) * nonpadding_mask_TB + hiddens = [] + for layer in self.layers: + x = layer(x, encoder_padding_mask=padding_mask, attn_mask=attn_mask) * nonpadding_mask_TB + hiddens.append(x) + if self.use_last_norm: + x = self.layer_norm(x) * nonpadding_mask_TB + if return_hiddens: + x = torch.stack(hiddens, 0) # [L, T, B, C] + x = x.transpose(1, 2) # [L, B, T, C] + else: + x = x.transpose(0, 1) # [B, T, C] + return x + + +class FastSpeechEncoder(FFTBlocks): + def __init__(self, dict_size, hidden_size=256, num_layers=4, kernel_size=9, num_heads=2, + dropout=0.0): + super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads, + use_pos_embed=False, dropout=dropout) # use_pos_embed_alpha for compatibility + self.embed_tokens = Embedding(dict_size, hidden_size, 0) + self.embed_scale = math.sqrt(hidden_size) + self.padding_idx = 0 + self.embed_positions = SinusoidalPositionalEmbedding( + hidden_size, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS, + ) + + def forward(self, txt_tokens, attn_mask=None): + """ + + :param txt_tokens: [B, T] + :return: { + 'encoder_out': [B x T x C] + } + """ + encoder_padding_mask = txt_tokens.eq(self.padding_idx).data + x = self.forward_embedding(txt_tokens) # [B, T, H] + if self.num_layers > 0: + x = super(FastSpeechEncoder, self).forward(x, encoder_padding_mask, attn_mask=attn_mask) + return x + + def forward_embedding(self, txt_tokens): + # embed tokens and positions + x = self.embed_scale * self.embed_tokens(txt_tokens) + if self.use_pos_embed: + positions = self.embed_positions(txt_tokens) + x = x + positions + x = F.dropout(x, p=self.dropout, training=self.training) + return x + + +class FastSpeechDecoder(FFTBlocks): + def __init__(self, hidden_size=256, num_layers=4, kernel_size=9, num_heads=2): + super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads) diff --git a/NeuralSeq/modules/commons/wavenet.py b/NeuralSeq/modules/commons/wavenet.py new file mode 100644 index 0000000000000000000000000000000000000000..7809c9b9d3331ba4fd2ffd4caae14e721e4b0732 --- /dev/null +++ b/NeuralSeq/modules/commons/wavenet.py @@ -0,0 +1,97 @@ +import torch +from torch import nn + + +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + +class WN(torch.nn.Module): + def __init__(self, hidden_size, kernel_size, dilation_rate, n_layers, c_cond=0, + p_dropout=0, share_cond_layers=False, is_BTC=False): + super(WN, self).__init__() + assert (kernel_size % 2 == 1) + assert (hidden_size % 2 == 0) + self.is_BTC = is_BTC + self.hidden_size = hidden_size + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = c_cond + self.p_dropout = p_dropout + self.share_cond_layers = share_cond_layers + + self.in_layers = torch.nn.ModuleList() + self.res_skip_layers = torch.nn.ModuleList() + self.drop = nn.Dropout(p_dropout) + + if c_cond != 0 and not share_cond_layers: + cond_layer = torch.nn.Conv1d(c_cond, 2 * hidden_size * n_layers, 1) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') + + for i in range(n_layers): + dilation = dilation_rate ** i + padding = int((kernel_size * dilation - dilation) / 2) + in_layer = torch.nn.Conv1d(hidden_size, 2 * hidden_size, kernel_size, + dilation=dilation, padding=padding) + in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') + self.in_layers.append(in_layer) + + # last one is not necessary + if i < n_layers - 1: + res_skip_channels = 2 * hidden_size + else: + res_skip_channels = hidden_size + + res_skip_layer = torch.nn.Conv1d(hidden_size, res_skip_channels, 1) + res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') + self.res_skip_layers.append(res_skip_layer) + + def forward(self, x, nonpadding=None, cond=None): + if self.is_BTC: + x = x.transpose(1, 2) + cond = cond.transpose(1, 2) if cond is not None else None + nonpadding = nonpadding.transpose(1, 2) if nonpadding is not None else None + if nonpadding is None: + nonpadding = 1 + output = torch.zeros_like(x) + n_channels_tensor = torch.IntTensor([self.hidden_size]) + + if cond is not None and not self.share_cond_layers: + cond = self.cond_layer(cond) + + for i in range(self.n_layers): + x_in = self.in_layers[i](x) + x_in = self.drop(x_in) + if cond is not None: + cond_offset = i * 2 * self.hidden_size + cond_l = cond[:, cond_offset:cond_offset + 2 * self.hidden_size, :] + else: + cond_l = torch.zeros_like(x_in) + + acts = fused_add_tanh_sigmoid_multiply(x_in, cond_l, n_channels_tensor) + + res_skip_acts = self.res_skip_layers[i](acts) + if i < self.n_layers - 1: + x = (x + res_skip_acts[:, :self.hidden_size, :]) * nonpadding + output = output + res_skip_acts[:, self.hidden_size:, :] + else: + output = output + res_skip_acts + output = output * nonpadding + if self.is_BTC: + output = output.transpose(1, 2) + return output + + def remove_weight_norm(self): + def remove_weight_norm(m): + try: + nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(remove_weight_norm) diff --git a/NeuralSeq/modules/diff/__pycache__/candidate_decoder.cpython-37.pyc b/NeuralSeq/modules/diff/__pycache__/candidate_decoder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b068efacde23774472f944f9a353151c0ef94a4d Binary files /dev/null and b/NeuralSeq/modules/diff/__pycache__/candidate_decoder.cpython-37.pyc differ diff --git a/NeuralSeq/modules/diff/__pycache__/candidate_decoder.cpython-38.pyc b/NeuralSeq/modules/diff/__pycache__/candidate_decoder.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0337a896f7d3094ca70eaae0741b5964e507082 Binary files /dev/null and b/NeuralSeq/modules/diff/__pycache__/candidate_decoder.cpython-38.pyc differ diff --git a/NeuralSeq/modules/diff/__pycache__/diffusion.cpython-37.pyc b/NeuralSeq/modules/diff/__pycache__/diffusion.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b3c8faee455be8da8ee96a8ff119394c1d6617b Binary files /dev/null and b/NeuralSeq/modules/diff/__pycache__/diffusion.cpython-37.pyc differ diff --git a/NeuralSeq/modules/diff/__pycache__/diffusion.cpython-38.pyc b/NeuralSeq/modules/diff/__pycache__/diffusion.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f5de19e01cc7cada26729e51abbf8ceb0895630 Binary files /dev/null and b/NeuralSeq/modules/diff/__pycache__/diffusion.cpython-38.pyc differ diff --git a/NeuralSeq/modules/diff/__pycache__/net.cpython-37.pyc b/NeuralSeq/modules/diff/__pycache__/net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c47607be87416fb1469d34df925e824fab8182e4 Binary files /dev/null and b/NeuralSeq/modules/diff/__pycache__/net.cpython-37.pyc differ diff --git a/NeuralSeq/modules/diff/__pycache__/net.cpython-38.pyc b/NeuralSeq/modules/diff/__pycache__/net.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d8f93808912f7e8beb6706f1e89ef58f3f99a1f Binary files /dev/null and b/NeuralSeq/modules/diff/__pycache__/net.cpython-38.pyc differ diff --git a/NeuralSeq/modules/diff/__pycache__/shallow_diffusion_tts.cpython-37.pyc b/NeuralSeq/modules/diff/__pycache__/shallow_diffusion_tts.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96567f0571f1726b27252b8e8b2d37f2764d3eb6 Binary files /dev/null and b/NeuralSeq/modules/diff/__pycache__/shallow_diffusion_tts.cpython-37.pyc differ diff --git a/NeuralSeq/modules/diff/__pycache__/shallow_diffusion_tts.cpython-38.pyc b/NeuralSeq/modules/diff/__pycache__/shallow_diffusion_tts.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67cb3c5d48cd15edc128cc7098dd6f119d0c4237 Binary files /dev/null and b/NeuralSeq/modules/diff/__pycache__/shallow_diffusion_tts.cpython-38.pyc differ diff --git a/NeuralSeq/modules/diff/candidate_decoder.py b/NeuralSeq/modules/diff/candidate_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..133a51a61942027c255841e2638e296238c07a30 --- /dev/null +++ b/NeuralSeq/modules/diff/candidate_decoder.py @@ -0,0 +1,96 @@ +from modules.fastspeech.tts_modules import FastspeechDecoder +# from modules.fastspeech.fast_tacotron import DecoderRNN +# from modules.fastspeech.speedy_speech.speedy_speech import ConvBlocks +# from modules.fastspeech.conformer.conformer import ConformerDecoder +import torch +from torch.nn import functional as F +import torch.nn as nn +import math +from utils.hparams import hparams +from .diffusion import Mish +Linear = nn.Linear + + +class SinusoidalPosEmb(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x): + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device) * -emb) + emb = x[:, None] * emb[None, :] + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +def Conv1d(*args, **kwargs): + layer = nn.Conv1d(*args, **kwargs) + nn.init.kaiming_normal_(layer.weight) + return layer + + +class FFT(FastspeechDecoder): + def __init__(self, hidden_size=None, num_layers=None, kernel_size=None, num_heads=None): + super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads) + dim = hparams['residual_channels'] + self.input_projection = Conv1d(hparams['audio_num_mel_bins'], dim, 1) + self.diffusion_embedding = SinusoidalPosEmb(dim) + self.mlp = nn.Sequential( + nn.Linear(dim, dim * 4), + Mish(), + nn.Linear(dim * 4, dim) + ) + self.get_mel_out = Linear(hparams['hidden_size'], 80, bias=True) + self.get_decode_inp = Linear(hparams['hidden_size'] + dim + dim, + hparams['hidden_size']) # hs + dim + 80 -> hs + + def forward(self, spec, diffusion_step, cond, padding_mask=None, attn_mask=None, return_hiddens=False): + """ + :param spec: [B, 1, 80, T] + :param diffusion_step: [B, 1] + :param cond: [B, M, T] + :return: + """ + x = spec[:, 0] + x = self.input_projection(x).permute([0, 2, 1]) # [B, T, residual_channel] + diffusion_step = self.diffusion_embedding(diffusion_step) + diffusion_step = self.mlp(diffusion_step) # [B, dim] + cond = cond.permute([0, 2, 1]) # [B, T, M] + + seq_len = cond.shape[1] # [T_mel] + time_embed = diffusion_step[:, None, :] # [B, 1, dim] + time_embed = time_embed.repeat([1, seq_len, 1]) # # [B, T, dim] + + decoder_inp = torch.cat([x, cond, time_embed], dim=-1) # [B, T, dim + H + dim] + decoder_inp = self.get_decode_inp(decoder_inp) # [B, T, H] + x = decoder_inp + + ''' + Required x: [B, T, C] + :return: [B, T, C] or [L, B, T, C] + ''' + padding_mask = x.abs().sum(-1).eq(0).data if padding_mask is None else padding_mask + nonpadding_mask_TB = 1 - padding_mask.transpose(0, 1).float()[:, :, None] # [T, B, 1] + if self.use_pos_embed: + positions = self.pos_embed_alpha * self.embed_positions(x[..., 0]) + x = x + positions + x = F.dropout(x, p=self.dropout, training=self.training) + # B x T x C -> T x B x C + x = x.transpose(0, 1) * nonpadding_mask_TB + hiddens = [] + for layer in self.layers: + x = layer(x, encoder_padding_mask=padding_mask, attn_mask=attn_mask) * nonpadding_mask_TB + hiddens.append(x) + if self.use_last_norm: + x = self.layer_norm(x) * nonpadding_mask_TB + if return_hiddens: + x = torch.stack(hiddens, 0) # [L, T, B, C] + x = x.transpose(1, 2) # [L, B, T, C] + else: + x = x.transpose(0, 1) # [B, T, C] + + x = self.get_mel_out(x).permute([0, 2, 1]) # [B, 80, T] + return x[:, None, :, :] \ No newline at end of file diff --git a/NeuralSeq/modules/diff/diffusion.py b/NeuralSeq/modules/diff/diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..c30976ab258feff830c2fa1a2d70876cb1d76eda --- /dev/null +++ b/NeuralSeq/modules/diff/diffusion.py @@ -0,0 +1,334 @@ +import math +import random +from functools import partial +from inspect import isfunction +from pathlib import Path +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn +from tqdm import tqdm +from einops import rearrange + +from modules.fastspeech.fs2 import FastSpeech2 +from modules.diffsinger_midi.fs2 import FastSpeech2MIDI +from utils.hparams import hparams + + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def cycle(dl): + while True: + for data in dl: + yield data + + +def num_to_groups(num, divisor): + groups = num // divisor + remainder = num % divisor + arr = [divisor] * groups + if remainder > 0: + arr.append(remainder) + return arr + + +class Residual(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + + def forward(self, x, *args, **kwargs): + return self.fn(x, *args, **kwargs) + x + + +class SinusoidalPosEmb(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x): + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device) * -emb) + emb = x[:, None] * emb[None, :] + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +class Mish(nn.Module): + def forward(self, x): + return x * torch.tanh(F.softplus(x)) + + +class Upsample(nn.Module): + def __init__(self, dim): + super().__init__() + self.conv = nn.ConvTranspose2d(dim, dim, 4, 2, 1) + + def forward(self, x): + return self.conv(x) + + +class Downsample(nn.Module): + def __init__(self, dim): + super().__init__() + self.conv = nn.Conv2d(dim, dim, 3, 2, 1) + + def forward(self, x): + return self.conv(x) + + +class Rezero(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + self.g = nn.Parameter(torch.zeros(1)) + + def forward(self, x): + return self.fn(x) * self.g + + +# building block modules + +class Block(nn.Module): + def __init__(self, dim, dim_out, groups=8): + super().__init__() + self.block = nn.Sequential( + nn.Conv2d(dim, dim_out, 3, padding=1), + nn.GroupNorm(groups, dim_out), + Mish() + ) + + def forward(self, x): + return self.block(x) + + +class ResnetBlock(nn.Module): + def __init__(self, dim, dim_out, *, time_emb_dim, groups=8): + super().__init__() + self.mlp = nn.Sequential( + Mish(), + nn.Linear(time_emb_dim, dim_out) + ) + + self.block1 = Block(dim, dim_out) + self.block2 = Block(dim_out, dim_out) + self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity() + + def forward(self, x, time_emb): + h = self.block1(x) + h += self.mlp(time_emb)[:, :, None, None] + h = self.block2(h) + return h + self.res_conv(x) + + +class LinearAttention(nn.Module): + def __init__(self, dim, heads=4, dim_head=32): + super().__init__() + self.heads = heads + hidden_dim = dim_head * heads + self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False) + self.to_out = nn.Conv2d(hidden_dim, dim, 1) + + def forward(self, x): + b, c, h, w = x.shape + qkv = self.to_qkv(x) + q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads=self.heads, qkv=3) + k = k.softmax(dim=-1) + context = torch.einsum('bhdn,bhen->bhde', k, v) + out = torch.einsum('bhde,bhdn->bhen', context, q) + out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) + return self.to_out(out) + + +# gaussian diffusion trainer class + +def extract(a, t, x_shape): + b, *_ = t.shape + out = a.gather(-1, t) + return out.reshape(b, *((1,) * (len(x_shape) - 1))) + + +def noise_like(shape, device, repeat=False): + repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) + noise = lambda: torch.randn(shape, device=device) + return repeat_noise() if repeat else noise() + + +def cosine_beta_schedule(timesteps, s=0.008): + """ + cosine schedule + as proposed in https://openreview.net/forum?id=-NEXDKk8gZ + """ + steps = timesteps + 1 + x = np.linspace(0, steps, steps) + alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2 + alphas_cumprod = alphas_cumprod / alphas_cumprod[0] + betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) + return np.clip(betas, a_min=0, a_max=0.999) + + +class GaussianDiffusion(nn.Module): + def __init__(self, phone_encoder, out_dims, denoise_fn, + timesteps=1000, loss_type='l1', betas=None, spec_min=None, spec_max=None): + super().__init__() + self.denoise_fn = denoise_fn + if hparams.get('use_midi') is not None and hparams['use_midi']: + self.fs2 = FastSpeech2MIDI(phone_encoder, out_dims) + else: + self.fs2 = FastSpeech2(phone_encoder, out_dims) + self.fs2.decoder = None + self.mel_bins = out_dims + + if exists(betas): + betas = betas.detach().cpu().numpy() if isinstance(betas, torch.Tensor) else betas + else: + betas = cosine_beta_schedule(timesteps) + + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.loss_type = loss_type + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod) + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer('posterior_variance', to_torch(posterior_variance)) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) + self.register_buffer('posterior_mean_coef1', to_torch( + betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) + self.register_buffer('posterior_mean_coef2', to_torch( + (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) + + self.register_buffer('spec_min', torch.FloatTensor(spec_min)[None, None, :hparams['keep_bins']]) + self.register_buffer('spec_max', torch.FloatTensor(spec_max)[None, None, :hparams['keep_bins']]) + + def q_mean_variance(self, x_start, t): + mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + variance = extract(1. - self.alphas_cumprod, t, x_start.shape) + log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape) + return mean, variance, log_variance + + def predict_start_from_noise(self, x_t, t, noise): + return ( + extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - + extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise + ) + + def q_posterior(self, x_start, x_t, t): + posterior_mean = ( + extract(self.posterior_mean_coef1, t, x_t.shape) * x_start + + extract(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = extract(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance(self, x, t, cond, clip_denoised: bool): + noise_pred = self.denoise_fn(x, t, cond=cond) + x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred) + + if clip_denoised: + x_recon.clamp_(-1., 1.) + + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False): + b, *_, device = *x.shape, x.device + model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond, clip_denoised=clip_denoised) + noise = noise_like(x.shape, device, repeat_noise) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return ( + extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise + ) + + def p_losses(self, x_start, t, cond, noise=None, nonpadding=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + x_recon = self.denoise_fn(x_noisy, t, cond) + + if self.loss_type == 'l1': + if nonpadding is not None: + loss = ((noise - x_recon).abs() * nonpadding.unsqueeze(1)).mean() + else: + # print('are you sure w/o nonpadding?') + loss = (noise - x_recon).abs().mean() + + elif self.loss_type == 'l2': + loss = F.mse_loss(noise, x_recon) + else: + raise NotImplementedError() + + return loss + + def forward(self, txt_tokens, mel2ph=None, spk_embed=None, + ref_mels=None, f0=None, uv=None, energy=None, infer=False): + b, *_, device = *txt_tokens.shape, txt_tokens.device + ret = self.fs2(txt_tokens, mel2ph, spk_embed, ref_mels, f0, uv, energy, + skip_decoder=True, infer=infer) + cond = ret['decoder_inp'].transpose(1, 2) + if not infer: + t = torch.randint(0, self.num_timesteps, (b,), device=device).long() + x = ref_mels + x = self.norm_spec(x) + x = x.transpose(1, 2)[:, None, :, :] # [B, 1, M, T] + nonpadding = (mel2ph != 0).float() + ret['diff_loss'] = self.p_losses(x, t, cond, nonpadding=nonpadding) + else: + t = self.num_timesteps + shape = (cond.shape[0], 1, self.mel_bins, cond.shape[2]) + x = torch.randn(shape, device=device) + for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t): + x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond) + x = x[:, 0].transpose(1, 2) + ret['mel_out'] = self.denorm_spec(x) + + return ret + + def norm_spec(self, x): + return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1 + + def denorm_spec(self, x): + return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min + + def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph): + return self.fs2.cwt2f0_norm(cwt_spec, mean, std, mel2ph) + + def out2mel(self, x): + return x diff --git a/NeuralSeq/modules/diff/net.py b/NeuralSeq/modules/diff/net.py new file mode 100644 index 0000000000000000000000000000000000000000..b8811115eafb4f27165cf4d89c67c0d9455aac9d --- /dev/null +++ b/NeuralSeq/modules/diff/net.py @@ -0,0 +1,130 @@ +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from math import sqrt + +from .diffusion import Mish +from utils.hparams import hparams + +Linear = nn.Linear +ConvTranspose2d = nn.ConvTranspose2d + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + def override(self, attrs): + if isinstance(attrs, dict): + self.__dict__.update(**attrs) + elif isinstance(attrs, (list, tuple, set)): + for attr in attrs: + self.override(attr) + elif attrs is not None: + raise NotImplementedError + return self + + +class SinusoidalPosEmb(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x): + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device) * -emb) + emb = x[:, None] * emb[None, :] + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +def Conv1d(*args, **kwargs): + layer = nn.Conv1d(*args, **kwargs) + nn.init.kaiming_normal_(layer.weight) + return layer + + +@torch.jit.script +def silu(x): + return x * torch.sigmoid(x) + + +class ResidualBlock(nn.Module): + def __init__(self, encoder_hidden, residual_channels, dilation): + super().__init__() + self.dilated_conv = Conv1d(residual_channels, 2 * residual_channels, 3, padding=dilation, dilation=dilation) + self.diffusion_projection = Linear(residual_channels, residual_channels) + self.conditioner_projection = Conv1d(encoder_hidden, 2 * residual_channels, 1) + self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1) + + def forward(self, x, conditioner, diffusion_step): + diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1) + conditioner = self.conditioner_projection(conditioner) + y = x + diffusion_step + + y = self.dilated_conv(y) + conditioner + + gate, filter = torch.chunk(y, 2, dim=1) + y = torch.sigmoid(gate) * torch.tanh(filter) + + y = self.output_projection(y) + residual, skip = torch.chunk(y, 2, dim=1) + return (x + residual) / sqrt(2.0), skip + + +class DiffNet(nn.Module): + def __init__(self, in_dims=80): + super().__init__() + self.params = params = AttrDict( + # Model params + encoder_hidden=hparams['hidden_size'], + residual_layers=hparams['residual_layers'], + residual_channels=hparams['residual_channels'], + dilation_cycle_length=hparams['dilation_cycle_length'], + ) + self.input_projection = Conv1d(in_dims, params.residual_channels, 1) + self.diffusion_embedding = SinusoidalPosEmb(params.residual_channels) + dim = params.residual_channels + self.mlp = nn.Sequential( + nn.Linear(dim, dim * 4), + Mish(), + nn.Linear(dim * 4, dim) + ) + self.residual_layers = nn.ModuleList([ + ResidualBlock(params.encoder_hidden, params.residual_channels, 2 ** (i % params.dilation_cycle_length)) + for i in range(params.residual_layers) + ]) + self.skip_projection = Conv1d(params.residual_channels, params.residual_channels, 1) + self.output_projection = Conv1d(params.residual_channels, in_dims, 1) + nn.init.zeros_(self.output_projection.weight) + + def forward(self, spec, diffusion_step, cond): + """ + + :param spec: [B, 1, M, T] + :param diffusion_step: [B, 1] + :param cond: [B, M, T] + :return: + """ + x = spec[:, 0] + x = self.input_projection(x) # x [B, residual_channel, T] + + x = F.relu(x) + diffusion_step = self.diffusion_embedding(diffusion_step) + diffusion_step = self.mlp(diffusion_step) + skip = [] + for layer_id, layer in enumerate(self.residual_layers): + x, skip_connection = layer(x, cond, diffusion_step) + skip.append(skip_connection) + + x = torch.sum(torch.stack(skip), dim=0) / sqrt(len(self.residual_layers)) + x = self.skip_projection(x) + x = F.relu(x) + x = self.output_projection(x) # [B, 80, T] + return x[:, None, :, :] diff --git a/NeuralSeq/modules/diff/shallow_diffusion_tts.py b/NeuralSeq/modules/diff/shallow_diffusion_tts.py new file mode 100644 index 0000000000000000000000000000000000000000..8295d48ea0028dd0b3fdf1315bb8f129d0070810 --- /dev/null +++ b/NeuralSeq/modules/diff/shallow_diffusion_tts.py @@ -0,0 +1,324 @@ +import math +import random +from collections import deque +from functools import partial +from inspect import isfunction +from pathlib import Path +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn +from tqdm import tqdm +from einops import rearrange + +from modules.fastspeech.fs2 import FastSpeech2 +from modules.diffsinger_midi.fs2 import FastSpeech2MIDI +from utils.hparams import hparams + + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +# gaussian diffusion trainer class + +def extract(a, t, x_shape): + b, *_ = t.shape + out = a.gather(-1, t) + return out.reshape(b, *((1,) * (len(x_shape) - 1))) + + +def noise_like(shape, device, repeat=False): + repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) + noise = lambda: torch.randn(shape, device=device) + return repeat_noise() if repeat else noise() + + +def linear_beta_schedule(timesteps, max_beta=hparams.get('max_beta', 0.01)): + """ + linear schedule + """ + betas = np.linspace(1e-4, max_beta, timesteps) + return betas + + +def cosine_beta_schedule(timesteps, s=0.008): + """ + cosine schedule + as proposed in https://openreview.net/forum?id=-NEXDKk8gZ + """ + steps = timesteps + 1 + x = np.linspace(0, steps, steps) + alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2 + alphas_cumprod = alphas_cumprod / alphas_cumprod[0] + betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) + return np.clip(betas, a_min=0, a_max=0.999) + + +beta_schedule = { + "cosine": cosine_beta_schedule, + "linear": linear_beta_schedule, +} + + +class GaussianDiffusion(nn.Module): + def __init__(self, phone_encoder, out_dims, denoise_fn, + timesteps=1000, K_step=1000, loss_type=hparams.get('diff_loss_type', 'l1'), betas=None, spec_min=None, spec_max=None): + super().__init__() + self.denoise_fn = denoise_fn + if hparams.get('use_midi') is not None and hparams['use_midi']: + self.fs2 = FastSpeech2MIDI(phone_encoder, out_dims) + else: + self.fs2 = FastSpeech2(phone_encoder, out_dims) + self.mel_bins = out_dims + + if exists(betas): + betas = betas.detach().cpu().numpy() if isinstance(betas, torch.Tensor) else betas + else: + if 'schedule_type' in hparams.keys(): + betas = beta_schedule[hparams['schedule_type']](timesteps) + else: + betas = cosine_beta_schedule(timesteps) + + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.K_step = K_step + self.loss_type = loss_type + + self.noise_list = deque(maxlen=4) + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod) + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer('posterior_variance', to_torch(posterior_variance)) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) + self.register_buffer('posterior_mean_coef1', to_torch( + betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) + self.register_buffer('posterior_mean_coef2', to_torch( + (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) + + self.register_buffer('spec_min', torch.FloatTensor(spec_min)[None, None, :hparams['keep_bins']]) + self.register_buffer('spec_max', torch.FloatTensor(spec_max)[None, None, :hparams['keep_bins']]) + + def q_mean_variance(self, x_start, t): + mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + variance = extract(1. - self.alphas_cumprod, t, x_start.shape) + log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape) + return mean, variance, log_variance + + def predict_start_from_noise(self, x_t, t, noise): + return ( + extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - + extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise + ) + + def q_posterior(self, x_start, x_t, t): + posterior_mean = ( + extract(self.posterior_mean_coef1, t, x_t.shape) * x_start + + extract(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = extract(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance(self, x, t, cond, clip_denoised: bool): + noise_pred = self.denoise_fn(x, t, cond=cond) + x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred) + + if clip_denoised: + x_recon.clamp_(-1., 1.) + + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False): + b, *_, device = *x.shape, x.device + model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond, clip_denoised=clip_denoised) + noise = noise_like(x.shape, device, repeat_noise) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def p_sample_plms(self, x, t, interval, cond, clip_denoised=True, repeat_noise=False): + """ + Use the PLMS method from [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778). + """ + + def get_x_pred(x, noise_t, t): + a_t = extract(self.alphas_cumprod, t, x.shape) + if t[0] < interval: + a_prev = torch.ones_like(a_t) + else: + a_prev = extract(self.alphas_cumprod, torch.max(t-interval, torch.zeros_like(t)), x.shape) + a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt() + + x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x - 1 / (a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t) + x_pred = x + x_delta + + return x_pred + + noise_list = self.noise_list + noise_pred = self.denoise_fn(x, t, cond=cond) + + if len(noise_list) == 0: + x_pred = get_x_pred(x, noise_pred, t) + noise_pred_prev = self.denoise_fn(x_pred, max(t-interval, 0), cond=cond) + noise_pred_prime = (noise_pred + noise_pred_prev) / 2 + elif len(noise_list) == 1: + noise_pred_prime = (3 * noise_pred - noise_list[-1]) / 2 + elif len(noise_list) == 2: + noise_pred_prime = (23 * noise_pred - 16 * noise_list[-1] + 5 * noise_list[-2]) / 12 + elif len(noise_list) >= 3: + noise_pred_prime = (55 * noise_pred - 59 * noise_list[-1] + 37 * noise_list[-2] - 9 * noise_list[-3]) / 24 + + x_prev = get_x_pred(x, noise_pred_prime, t) + noise_list.append(noise_pred) + + return x_prev + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return ( + extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise + ) + + def p_losses(self, x_start, t, cond, noise=None, nonpadding=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + x_recon = self.denoise_fn(x_noisy, t, cond) + + if self.loss_type == 'l1': + if nonpadding is not None: + loss = ((noise - x_recon).abs() * nonpadding.unsqueeze(1)).mean() + else: + # print('are you sure w/o nonpadding?') + loss = (noise - x_recon).abs().mean() + + elif self.loss_type == 'l2': + loss = F.mse_loss(noise, x_recon) + else: + raise NotImplementedError() + + return loss + + def forward(self, txt_tokens, mel2ph=None, spk_embed=None, + ref_mels=None, f0=None, uv=None, energy=None, infer=False, **kwargs): + b, *_, device = *txt_tokens.shape, txt_tokens.device + ret = self.fs2(txt_tokens, mel2ph, spk_embed, ref_mels, f0, uv, energy, + skip_decoder=(not infer), infer=infer, **kwargs) + cond = ret['decoder_inp'].transpose(1, 2) + + if not infer: + t = torch.randint(0, self.K_step, (b,), device=device).long() + x = ref_mels + x = self.norm_spec(x) + x = x.transpose(1, 2)[:, None, :, :] # [B, 1, M, T] + ret['diff_loss'] = self.p_losses(x, t, cond) + # nonpadding = (mel2ph != 0).float() + # ret['diff_loss'] = self.p_losses(x, t, cond, nonpadding=nonpadding) + else: + ret['fs2_mel'] = ret['mel_out'] + fs2_mels = ret['mel_out'] + t = self.K_step + fs2_mels = self.norm_spec(fs2_mels) + fs2_mels = fs2_mels.transpose(1, 2)[:, None, :, :] + + x = self.q_sample(x_start=fs2_mels, t=torch.tensor([t - 1], device=device).long()) + if hparams.get('gaussian_start') is not None and hparams['gaussian_start']: + print('===> gaussion start.') + shape = (cond.shape[0], 1, self.mel_bins, cond.shape[2]) + x = torch.randn(shape, device=device) + + if hparams.get('pndm_speedup'): + print('===> pndm speed:', hparams['pndm_speedup']) + self.noise_list = deque(maxlen=4) + iteration_interval = hparams['pndm_speedup'] + for i in tqdm(reversed(range(0, t, iteration_interval)), desc='sample time step', + total=t // iteration_interval): + x = self.p_sample_plms(x, torch.full((b,), i, device=device, dtype=torch.long), iteration_interval, + cond) + else: + for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t): + x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond) + x = x[:, 0].transpose(1, 2) + if mel2ph is not None: # for singing + ret['mel_out'] = self.denorm_spec(x) * ((mel2ph > 0).float()[:, :, None]) + else: + ret['mel_out'] = self.denorm_spec(x) + return ret + + def norm_spec(self, x): + return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1 + + def denorm_spec(self, x): + return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min + + def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph): + return self.fs2.cwt2f0_norm(cwt_spec, mean, std, mel2ph) + + def out2mel(self, x): + return x + + +class OfflineGaussianDiffusion(GaussianDiffusion): + def forward(self, txt_tokens, mel2ph=None, spk_embed=None, + ref_mels=None, f0=None, uv=None, energy=None, infer=False, **kwargs): + b, *_, device = *txt_tokens.shape, txt_tokens.device + + ret = self.fs2(txt_tokens, mel2ph, spk_embed, ref_mels, f0, uv, energy, + skip_decoder=True, infer=True, **kwargs) + cond = ret['decoder_inp'].transpose(1, 2) + fs2_mels = ref_mels[1] + ref_mels = ref_mels[0] + + if not infer: + t = torch.randint(0, self.K_step, (b,), device=device).long() + x = ref_mels + x = self.norm_spec(x) + x = x.transpose(1, 2)[:, None, :, :] # [B, 1, M, T] + ret['diff_loss'] = self.p_losses(x, t, cond) + else: + t = self.K_step + fs2_mels = self.norm_spec(fs2_mels) + fs2_mels = fs2_mels.transpose(1, 2)[:, None, :, :] + + x = self.q_sample(x_start=fs2_mels, t=torch.tensor([t - 1], device=device).long()) + + if hparams.get('gaussian_start') is not None and hparams['gaussian_start']: + print('===> gaussion start.') + shape = (cond.shape[0], 1, self.mel_bins, cond.shape[2]) + x = torch.randn(shape, device=device) + for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t): + x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond) + x = x[:, 0].transpose(1, 2) + ret['mel_out'] = self.denorm_spec(x) + return ret diff --git a/NeuralSeq/modules/diffsinger_midi/__pycache__/fs2.cpython-37.pyc b/NeuralSeq/modules/diffsinger_midi/__pycache__/fs2.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5752eddef5ef5e9c3198af1abf195793961e61e4 Binary files /dev/null and b/NeuralSeq/modules/diffsinger_midi/__pycache__/fs2.cpython-37.pyc differ diff --git a/NeuralSeq/modules/diffsinger_midi/__pycache__/fs2.cpython-38.pyc b/NeuralSeq/modules/diffsinger_midi/__pycache__/fs2.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93e6f773fcc23e4033f61fbb39be7d96b297c529 Binary files /dev/null and b/NeuralSeq/modules/diffsinger_midi/__pycache__/fs2.cpython-38.pyc differ diff --git a/NeuralSeq/modules/diffsinger_midi/fs2.py b/NeuralSeq/modules/diffsinger_midi/fs2.py new file mode 100644 index 0000000000000000000000000000000000000000..94a9ec3c51cd749576a5f60caec5e5b08d2a7d02 --- /dev/null +++ b/NeuralSeq/modules/diffsinger_midi/fs2.py @@ -0,0 +1,119 @@ +from modules.commons.common_layers import * +from modules.commons.common_layers import Embedding +from modules.fastspeech.tts_modules import FastspeechDecoder, DurationPredictor, LengthRegulator, PitchPredictor, \ + EnergyPredictor, FastspeechEncoder +from utils.cwt import cwt2f0 +from utils.hparams import hparams +from utils.pitch_utils import f0_to_coarse, denorm_f0, norm_f0 +from modules.fastspeech.fs2 import FastSpeech2 + + +class FastspeechMIDIEncoder(FastspeechEncoder): + def forward_embedding(self, txt_tokens, midi_embedding, midi_dur_embedding, slur_embedding): + # embed tokens and positions + x = self.embed_scale * self.embed_tokens(txt_tokens) + x = x + midi_embedding + midi_dur_embedding + slur_embedding + if hparams['use_pos_embed']: + if hparams.get('rel_pos') is not None and hparams['rel_pos']: + x = self.embed_positions(x) + else: + positions = self.embed_positions(txt_tokens) + x = x + positions + x = F.dropout(x, p=self.dropout, training=self.training) + return x + + def forward(self, txt_tokens, midi_embedding, midi_dur_embedding, slur_embedding): + """ + + :param txt_tokens: [B, T] + :return: { + 'encoder_out': [T x B x C] + } + """ + encoder_padding_mask = txt_tokens.eq(self.padding_idx).data + x = self.forward_embedding(txt_tokens, midi_embedding, midi_dur_embedding, slur_embedding) # [B, T, H] + x = super(FastspeechEncoder, self).forward(x, encoder_padding_mask) + return x + + +FS_ENCODERS = { + 'fft': lambda hp, embed_tokens, d: FastspeechMIDIEncoder( + embed_tokens, hp['hidden_size'], hp['enc_layers'], hp['enc_ffn_kernel_size'], + num_heads=hp['num_heads']), +} + + +class FastSpeech2MIDI(FastSpeech2): + def __init__(self, dictionary, out_dims=None): + super().__init__(dictionary, out_dims) + del self.encoder + self.encoder = FS_ENCODERS[hparams['encoder_type']](hparams, self.encoder_embed_tokens, self.dictionary) + self.midi_embed = Embedding(300, self.hidden_size, self.padding_idx) + self.midi_dur_layer = Linear(1, self.hidden_size) + self.is_slur_embed = Embedding(2, self.hidden_size) + + def forward(self, txt_tokens, mel2ph=None, spk_embed=None, + ref_mels=None, f0=None, uv=None, energy=None, skip_decoder=False, + spk_embed_dur_id=None, spk_embed_f0_id=None, infer=False, **kwargs): + ret = {} + + midi_embedding = self.midi_embed(kwargs['pitch_midi']) + midi_dur_embedding, slur_embedding = 0, 0 + if kwargs.get('midi_dur') is not None: + midi_dur_embedding = self.midi_dur_layer(kwargs['midi_dur'][:, :, None]) # [B, T, 1] -> [B, T, H] + if kwargs.get('is_slur') is not None: + slur_embedding = self.is_slur_embed(kwargs['is_slur']) + encoder_out = self.encoder(txt_tokens, midi_embedding, midi_dur_embedding, slur_embedding) # [B, T, C] + src_nonpadding = (txt_tokens > 0).float()[:, :, None] + + # add ref style embed + # Not implemented + # variance encoder + var_embed = 0 + + # encoder_out_dur denotes encoder outputs for duration predictor + # in speech adaptation, duration predictor use old speaker embedding + if hparams['use_spk_embed']: + spk_embed_dur = spk_embed_f0 = spk_embed = self.spk_embed_proj(spk_embed)[:, None, :] + elif hparams['use_spk_id']: + spk_embed_id = spk_embed + if spk_embed_dur_id is None: + spk_embed_dur_id = spk_embed_id + if spk_embed_f0_id is None: + spk_embed_f0_id = spk_embed_id + spk_embed = self.spk_embed_proj(spk_embed_id)[:, None, :] + spk_embed_dur = spk_embed_f0 = spk_embed + if hparams['use_split_spk_id']: + spk_embed_dur = self.spk_embed_dur(spk_embed_dur_id)[:, None, :] + spk_embed_f0 = self.spk_embed_f0(spk_embed_f0_id)[:, None, :] + else: + spk_embed_dur = spk_embed_f0 = spk_embed = 0 + + # add dur + dur_inp = (encoder_out + var_embed + spk_embed_dur) * src_nonpadding + + mel2ph = self.add_dur(dur_inp, mel2ph, txt_tokens, ret) + + decoder_inp = F.pad(encoder_out, [0, 0, 1, 0]) + + mel2ph_ = mel2ph[..., None].repeat([1, 1, encoder_out.shape[-1]]) + decoder_inp_origin = decoder_inp = torch.gather(decoder_inp, 1, mel2ph_) # [B, T, H] + + tgt_nonpadding = (mel2ph > 0).float()[:, :, None] + + # add pitch and energy embed + pitch_inp = (decoder_inp_origin + var_embed + spk_embed_f0) * tgt_nonpadding + if hparams['use_pitch_embed']: + pitch_inp_ph = (encoder_out + var_embed + spk_embed_f0) * src_nonpadding + decoder_inp = decoder_inp + self.add_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out=pitch_inp_ph) + if hparams['use_energy_embed']: + decoder_inp = decoder_inp + self.add_energy(pitch_inp, energy, ret) + + ret['decoder_inp'] = decoder_inp = (decoder_inp + spk_embed) * tgt_nonpadding + + if skip_decoder: + return ret + ret['mel_out'] = self.run_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs) + + return ret + diff --git a/NeuralSeq/modules/fastspeech/__pycache__/fs2.cpython-37.pyc b/NeuralSeq/modules/fastspeech/__pycache__/fs2.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7013175ffa0bb0046d6831dbaa6caacb019a337 Binary files /dev/null and b/NeuralSeq/modules/fastspeech/__pycache__/fs2.cpython-37.pyc differ diff --git a/NeuralSeq/modules/fastspeech/__pycache__/fs2.cpython-38.pyc b/NeuralSeq/modules/fastspeech/__pycache__/fs2.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d434650ddb58aa15b761ed72ab04869f74cb6239 Binary files /dev/null and b/NeuralSeq/modules/fastspeech/__pycache__/fs2.cpython-38.pyc differ diff --git a/NeuralSeq/modules/fastspeech/__pycache__/pe.cpython-37.pyc b/NeuralSeq/modules/fastspeech/__pycache__/pe.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfe21c5f81ced5271bc33a6097999376692d8bb6 Binary files /dev/null and b/NeuralSeq/modules/fastspeech/__pycache__/pe.cpython-37.pyc differ diff --git a/NeuralSeq/modules/fastspeech/__pycache__/pe.cpython-38.pyc b/NeuralSeq/modules/fastspeech/__pycache__/pe.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bd42347f6a92f65f7380071896a45096813a082 Binary files /dev/null and b/NeuralSeq/modules/fastspeech/__pycache__/pe.cpython-38.pyc differ diff --git a/NeuralSeq/modules/fastspeech/__pycache__/tts_modules.cpython-37.pyc b/NeuralSeq/modules/fastspeech/__pycache__/tts_modules.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3624241d449eb7586e74ef7079f9ed4d8229de64 Binary files /dev/null and b/NeuralSeq/modules/fastspeech/__pycache__/tts_modules.cpython-37.pyc differ diff --git a/NeuralSeq/modules/fastspeech/__pycache__/tts_modules.cpython-38.pyc b/NeuralSeq/modules/fastspeech/__pycache__/tts_modules.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cf7a536618e01a30a436f18020638fafaf5a2ae Binary files /dev/null and b/NeuralSeq/modules/fastspeech/__pycache__/tts_modules.cpython-38.pyc differ diff --git a/NeuralSeq/modules/fastspeech/fs2.py b/NeuralSeq/modules/fastspeech/fs2.py new file mode 100644 index 0000000000000000000000000000000000000000..a4487ee37075e1fc2209698e75b164410c191a18 --- /dev/null +++ b/NeuralSeq/modules/fastspeech/fs2.py @@ -0,0 +1,250 @@ +from utils.hparams import hparams +from modules.commons.common_layers import * +from modules.commons.common_layers import Embedding +from modules.fastspeech.tts_modules import FastspeechDecoder, DurationPredictor, LengthRegulator, PitchPredictor, \ + EnergyPredictor, FastspeechEncoder +from utils.cwt import cwt2f0 +from utils.pitch_utils import f0_to_coarse, denorm_f0, norm_f0 +import torch.nn as nn +from modules.commons.rel_transformer import RelTransformerEncoder, BERTRelTransformerEncoder +FS_ENCODERS = { + 'fft': lambda hp, embed_tokens, d: FastspeechEncoder( + embed_tokens, hp['hidden_size'], hp['enc_layers'], hp['enc_ffn_kernel_size'], + num_heads=hp['num_heads']), +} + +FS_DECODERS = { + 'fft': lambda hp: FastspeechDecoder( + hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']), +} + + +class FastSpeech2(nn.Module): + def __init__(self, dictionary, out_dims=None): + super().__init__() + self.dictionary = dictionary + self.padding_idx = dictionary.pad() + self.enc_layers = hparams['enc_layers'] + self.dec_layers = hparams['dec_layers'] + self.hidden_size = hparams['hidden_size'] + self.encoder_embed_tokens = self.build_embedding(self.dictionary, self.hidden_size) + if hparams.get("use_bert", False): + self.ph_encoder = BERTRelTransformerEncoder(len(self.dictionary), hparams['hidden_size'], hparams['hidden_size'], + hparams['ffn_hidden_size'], hparams['num_heads'], hparams['enc_layers'], + hparams['enc_ffn_kernel_size'], hparams['dropout'], prenet=hparams['enc_prenet'], pre_ln=hparams['enc_pre_ln']) + else: + self.encoder = FS_ENCODERS[hparams['encoder_type']](hparams, self.encoder_embed_tokens, self.dictionary) + self.decoder = FS_DECODERS[hparams['decoder_type']](hparams) + self.out_dims = hparams['audio_num_mel_bins'] if out_dims is None else out_dims + self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True) + + if hparams['use_spk_id']: + self.spk_embed_proj = Embedding(hparams['num_spk'] + 1, self.hidden_size) + if hparams['use_split_spk_id']: + self.spk_embed_f0 = Embedding(hparams['num_spk'] + 1, self.hidden_size) + self.spk_embed_dur = Embedding(hparams['num_spk'] + 1, self.hidden_size) + elif hparams['use_spk_embed']: + self.spk_embed_proj = Linear(256, self.hidden_size, bias=True) + predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size + self.dur_predictor = DurationPredictor( + self.hidden_size, + n_chans=predictor_hidden, + n_layers=hparams['dur_predictor_layers'], + dropout_rate=hparams['predictor_dropout'], + kernel_size=hparams['dur_predictor_kernel']) + self.length_regulator = LengthRegulator() + if hparams['use_pitch_embed']: + self.pitch_embed = Embedding(300, self.hidden_size, self.padding_idx) + self.pitch_predictor = PitchPredictor( + self.hidden_size, + n_chans=predictor_hidden, + n_layers=hparams['predictor_layers'], + dropout_rate=hparams['predictor_dropout'], + odim=2 if hparams['pitch_type'] == 'frame' else 1, + kernel_size=hparams['predictor_kernel']) + if hparams.get('use_energy_embed', False): + self.energy_embed = Embedding(256, self.hidden_size, self.padding_idx) + self.energy_predictor = EnergyPredictor( + self.hidden_size, + n_chans=predictor_hidden, + n_layers=hparams['predictor_layers'], + dropout_rate=hparams['predictor_dropout'], odim=1, + kernel_size=hparams['predictor_kernel']) + + def build_embedding(self, dictionary, embed_dim): + num_embeddings = len(dictionary) + emb = Embedding(num_embeddings, embed_dim, self.padding_idx) + return emb + + def forward(self, txt_tokens, mel2ph=None, spk_embed=None, + ref_mels=None, f0=None, uv=None, energy=None, skip_decoder=False, + spk_embed_dur_id=None, spk_embed_f0_id=None, infer=False, **kwargs): + ret = {} + if hparams.get("use_bert", False): + encoder_out = self.encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=kwargs['ph2word'], ret=ret) + else: + encoder_out = self.encoder(txt_tokens) # [B, T, C] + src_nonpadding = (txt_tokens > 0).float()[:, :, None] + + # add ref style embed + # Not implemented + # variance encoder + var_embed = 0 + + # encoder_out_dur denotes encoder outputs for duration predictor + # in speech adaptation, duration predictor use old speaker embedding + if hparams['use_spk_embed']: + spk_embed_dur = spk_embed_f0 = spk_embed = self.spk_embed_proj(spk_embed)[:, None, :] + elif hparams['use_spk_id']: + spk_embed_id = spk_embed + if spk_embed_dur_id is None: + spk_embed_dur_id = spk_embed_id + if spk_embed_f0_id is None: + spk_embed_f0_id = spk_embed_id + spk_embed = self.spk_embed_proj(spk_embed_id)[:, None, :] + spk_embed_dur = spk_embed_f0 = spk_embed + if hparams['use_split_spk_id']: + spk_embed_dur = self.spk_embed_dur(spk_embed_dur_id)[:, None, :] + spk_embed_f0 = self.spk_embed_f0(spk_embed_f0_id)[:, None, :] + else: + spk_embed_dur = spk_embed_f0 = spk_embed = 0 + + # add dur + dur_inp = (encoder_out + var_embed + spk_embed_dur) * src_nonpadding + + mel2ph = self.add_dur(dur_inp, mel2ph, txt_tokens, ret) + + decoder_inp = F.pad(encoder_out, [0, 0, 1, 0]) + + mel2ph_ = mel2ph[..., None].repeat([1, 1, encoder_out.shape[-1]]) + decoder_inp_origin = decoder_inp = torch.gather(decoder_inp, 1, mel2ph_) # [B, T, H] + + tgt_nonpadding = (mel2ph > 0).float()[:, :, None] + + # add pitch and energy embed + pitch_inp = (decoder_inp_origin + var_embed + spk_embed_f0) * tgt_nonpadding + if hparams['use_pitch_embed']: + pitch_inp_ph = (encoder_out + var_embed + spk_embed_f0) * src_nonpadding + decoder_inp = decoder_inp + self.add_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out=pitch_inp_ph) + if hparams.get('use_energy_embed', False): + decoder_inp = decoder_inp + self.add_energy(pitch_inp, energy, ret) + + ret['decoder_inp'] = decoder_inp = (decoder_inp + spk_embed) * tgt_nonpadding + + if skip_decoder: + return ret + ret['mel_out'] = self.run_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs) + + return ret + + def add_dur(self, dur_input, mel2ph, txt_tokens, ret): + """ + + :param dur_input: [B, T_txt, H] + :param mel2ph: [B, T_mel] + :param txt_tokens: [B, T_txt] + :param ret: + :return: + """ + src_padding = txt_tokens == 0 + dur_input = dur_input.detach() + hparams['predictor_grad'] * (dur_input - dur_input.detach()) + if mel2ph is None: + dur, xs = self.dur_predictor.inference(dur_input, src_padding) + ret['dur'] = xs + ret['dur_choice'] = dur + mel2ph = self.length_regulator(dur, src_padding).detach() + # from modules.fastspeech.fake_modules import FakeLengthRegulator + # fake_lr = FakeLengthRegulator() + # fake_mel2ph = fake_lr(dur, (1 - src_padding.long()).sum(-1))[..., 0].detach() + # print(mel2ph == fake_mel2ph) + else: + ret['dur'] = self.dur_predictor(dur_input, src_padding) + ret['mel2ph'] = mel2ph + return mel2ph + + def add_energy(self, decoder_inp, energy, ret): + decoder_inp = decoder_inp.detach() + hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach()) + ret['energy_pred'] = energy_pred = self.energy_predictor(decoder_inp)[:, :, 0] + if energy is None: + energy = energy_pred + energy = torch.clamp(energy * 256 // 4, max=255).long() + energy_embed = self.energy_embed(energy) + return energy_embed + + def add_pitch(self, decoder_inp, f0, uv, mel2ph, ret, encoder_out=None): + if hparams['pitch_type'] == 'ph': + pitch_pred_inp = encoder_out.detach() + hparams['predictor_grad'] * (encoder_out - encoder_out.detach()) + pitch_padding = encoder_out.sum().abs() == 0 + ret['pitch_pred'] = pitch_pred = self.pitch_predictor(pitch_pred_inp) + if f0 is None: + f0 = pitch_pred[:, :, 0] + ret['f0_denorm'] = f0_denorm = denorm_f0(f0, None, hparams, pitch_padding=pitch_padding) + pitch = f0_to_coarse(f0_denorm) # start from 0 [B, T_txt] + pitch = F.pad(pitch, [1, 0]) + pitch = torch.gather(pitch, 1, mel2ph) # [B, T_mel] + pitch_embed = self.pitch_embed(pitch) + return pitch_embed + decoder_inp = decoder_inp.detach() + hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach()) + + pitch_padding = mel2ph == 0 + + if hparams['pitch_type'] == 'cwt': + pitch_padding = None + ret['cwt'] = cwt_out = self.cwt_predictor(decoder_inp) + stats_out = self.cwt_stats_layers(encoder_out[:, 0, :]) # [B, 2] + mean = ret['f0_mean'] = stats_out[:, 0] + std = ret['f0_std'] = stats_out[:, 1] + cwt_spec = cwt_out[:, :, :10] + if f0 is None: + std = std * hparams['cwt_std_scale'] + f0 = self.cwt2f0_norm(cwt_spec, mean, std, mel2ph) + if hparams['use_uv']: + assert cwt_out.shape[-1] == 11 + uv = cwt_out[:, :, -1] > 0 + elif hparams['pitch_ar']: + ret['pitch_pred'] = pitch_pred = self.pitch_predictor(decoder_inp, f0 if self.training else None) + if f0 is None: + f0 = pitch_pred[:, :, 0] + else: + ret['pitch_pred'] = pitch_pred = self.pitch_predictor(decoder_inp) + if f0 is None: + f0 = pitch_pred[:, :, 0] + if hparams['use_uv'] and uv is None: + uv = pitch_pred[:, :, 1] > 0 + ret['f0_denorm'] = f0_denorm = denorm_f0(f0, uv, hparams, pitch_padding=pitch_padding) + if pitch_padding is not None: + f0[pitch_padding] = 0 + + pitch = f0_to_coarse(f0_denorm) # start from 0 + pitch_embed = self.pitch_embed(pitch) + return pitch_embed + + def run_decoder(self, decoder_inp, tgt_nonpadding, ret, infer, **kwargs): + x = decoder_inp # [B, T, H] + x = self.decoder(x) + x = self.mel_out(x) + return x * tgt_nonpadding + + def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph): + f0 = cwt2f0(cwt_spec, mean, std, hparams['cwt_scales']) + f0 = torch.cat( + [f0] + [f0[:, -1:]] * (mel2ph.shape[1] - f0.shape[1]), 1) + f0_norm = norm_f0(f0, None, hparams) + return f0_norm + + def out2mel(self, out): + return out + + @staticmethod + def mel_norm(x): + return (x + 5.5) / (6.3 / 2) - 1 + + @staticmethod + def mel_denorm(x): + return (x + 1) * (6.3 / 2) - 5.5 + + def expand_states(self, h, mel2ph): + h = F.pad(h, [0, 0, 1, 0]) + mel2ph_ = mel2ph[..., None].repeat([1, 1, h.shape[-1]]) + h = torch.gather(h, 1, mel2ph_) # [B, T, H] + return h diff --git a/NeuralSeq/modules/fastspeech/pe.py b/NeuralSeq/modules/fastspeech/pe.py new file mode 100644 index 0000000000000000000000000000000000000000..d9fa5098b378bb4ed10f97f05a9ff725d1d2239c --- /dev/null +++ b/NeuralSeq/modules/fastspeech/pe.py @@ -0,0 +1,149 @@ +from modules.commons.common_layers import * +from utils.hparams import hparams +from modules.fastspeech.tts_modules import PitchPredictor +from utils.pitch_utils import denorm_f0 + + +class Prenet(nn.Module): + def __init__(self, in_dim=80, out_dim=256, kernel=5, n_layers=3, strides=None): + super(Prenet, self).__init__() + padding = kernel // 2 + self.layers = [] + self.strides = strides if strides is not None else [1] * n_layers + for l in range(n_layers): + self.layers.append(nn.Sequential( + nn.Conv1d(in_dim, out_dim, kernel_size=kernel, padding=padding, stride=self.strides[l]), + nn.ReLU(), + nn.BatchNorm1d(out_dim) + )) + in_dim = out_dim + self.layers = nn.ModuleList(self.layers) + self.out_proj = nn.Linear(out_dim, out_dim) + + def forward(self, x): + """ + + :param x: [B, T, 80] + :return: [L, B, T, H], [B, T, H] + """ + padding_mask = x.abs().sum(-1).eq(0).data # [B, T] + nonpadding_mask_TB = 1 - padding_mask.float()[:, None, :] # [B, 1, T] + x = x.transpose(1, 2) + hiddens = [] + for i, l in enumerate(self.layers): + nonpadding_mask_TB = nonpadding_mask_TB[:, :, ::self.strides[i]] + x = l(x) * nonpadding_mask_TB + hiddens.append(x) + hiddens = torch.stack(hiddens, 0) # [L, B, H, T] + hiddens = hiddens.transpose(2, 3) # [L, B, T, H] + x = self.out_proj(x.transpose(1, 2)) # [B, T, H] + x = x * nonpadding_mask_TB.transpose(1, 2) + return hiddens, x + + +class ConvBlock(nn.Module): + def __init__(self, idim=80, n_chans=256, kernel_size=3, stride=1, norm='gn', dropout=0): + super().__init__() + self.conv = ConvNorm(idim, n_chans, kernel_size, stride=stride) + self.norm = norm + if self.norm == 'bn': + self.norm = nn.BatchNorm1d(n_chans) + elif self.norm == 'in': + self.norm = nn.InstanceNorm1d(n_chans, affine=True) + elif self.norm == 'gn': + self.norm = nn.GroupNorm(n_chans // 16, n_chans) + elif self.norm == 'ln': + self.norm = LayerNorm(n_chans // 16, n_chans) + elif self.norm == 'wn': + self.conv = torch.nn.utils.weight_norm(self.conv.conv) + self.dropout = nn.Dropout(dropout) + self.relu = nn.ReLU() + + def forward(self, x): + """ + + :param x: [B, C, T] + :return: [B, C, T] + """ + x = self.conv(x) + if not isinstance(self.norm, str): + if self.norm == 'none': + pass + elif self.norm == 'ln': + x = self.norm(x.transpose(1, 2)).transpose(1, 2) + else: + x = self.norm(x) + x = self.relu(x) + x = self.dropout(x) + return x + + +class ConvStacks(nn.Module): + def __init__(self, idim=80, n_layers=5, n_chans=256, odim=32, kernel_size=5, norm='gn', + dropout=0, strides=None, res=True): + super().__init__() + self.conv = torch.nn.ModuleList() + self.kernel_size = kernel_size + self.res = res + self.in_proj = Linear(idim, n_chans) + if strides is None: + strides = [1] * n_layers + else: + assert len(strides) == n_layers + for idx in range(n_layers): + self.conv.append(ConvBlock( + n_chans, n_chans, kernel_size, stride=strides[idx], norm=norm, dropout=dropout)) + self.out_proj = Linear(n_chans, odim) + + def forward(self, x, return_hiddens=False): + """ + + :param x: [B, T, H] + :return: [B, T, H] + """ + x = self.in_proj(x) + x = x.transpose(1, -1) # (B, idim, Tmax) + hiddens = [] + for f in self.conv: + x_ = f(x) + x = x + x_ if self.res else x_ # (B, C, Tmax) + hiddens.append(x) + x = x.transpose(1, -1) + x = self.out_proj(x) # (B, Tmax, H) + if return_hiddens: + hiddens = torch.stack(hiddens, 1) # [B, L, C, T] + return x, hiddens + return x + + +class PitchExtractor(nn.Module): + def __init__(self, n_mel_bins=80, conv_layers=2): + super().__init__() + self.hidden_size = hparams['hidden_size'] + self.predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size + self.conv_layers = conv_layers + + self.mel_prenet = Prenet(n_mel_bins, self.hidden_size, strides=[1, 1, 1]) + if self.conv_layers > 0: + self.mel_encoder = ConvStacks( + idim=self.hidden_size, n_chans=self.hidden_size, odim=self.hidden_size, n_layers=self.conv_layers) + self.pitch_predictor = PitchPredictor( + self.hidden_size, n_chans=self.predictor_hidden, + n_layers=5, dropout_rate=0.1, odim=2, + padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel']) + + def forward(self, mel_input=None): + ret = {} + mel_hidden = self.mel_prenet(mel_input)[1] + if self.conv_layers > 0: + mel_hidden = self.mel_encoder(mel_hidden) + + ret['pitch_pred'] = pitch_pred = self.pitch_predictor(mel_hidden) + + pitch_padding = mel_input.abs().sum(-1) == 0 + use_uv = hparams['pitch_type'] == 'frame' and hparams['use_uv'] + + ret['f0_denorm_pred'] = denorm_f0( + pitch_pred[:, :, 0], (pitch_pred[:, :, 1] > 0) if use_uv else None, + hparams, pitch_padding=pitch_padding) + return ret \ No newline at end of file diff --git a/NeuralSeq/modules/fastspeech/tts_modules.py b/NeuralSeq/modules/fastspeech/tts_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..2d65438656fe6860ae272042a9631025326edd75 --- /dev/null +++ b/NeuralSeq/modules/fastspeech/tts_modules.py @@ -0,0 +1,385 @@ +import logging +import math + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from modules.commons.espnet_positional_embedding import RelPositionalEncoding +from modules.commons.common_layers import SinusoidalPositionalEmbedding, Linear, EncSALayer, DecSALayer, BatchNorm1dTBC +from utils.hparams import hparams + +DEFAULT_MAX_SOURCE_POSITIONS = 2000 +DEFAULT_MAX_TARGET_POSITIONS = 2000 + + +class TransformerEncoderLayer(nn.Module): + def __init__(self, hidden_size, dropout, kernel_size=None, num_heads=2, norm='ln'): + super().__init__() + self.hidden_size = hidden_size + self.dropout = dropout + self.num_heads = num_heads + self.op = EncSALayer( + hidden_size, num_heads, dropout=dropout, + attention_dropout=0.0, relu_dropout=dropout, + kernel_size=kernel_size + if kernel_size is not None else hparams['enc_ffn_kernel_size'], + padding=hparams['ffn_padding'], + norm=norm, act=hparams['ffn_act']) + + def forward(self, x, **kwargs): + return self.op(x, **kwargs) + + +###################### +# fastspeech modules +###################### +class LayerNorm(torch.nn.LayerNorm): + """Layer normalization module. + :param int nout: output dim size + :param int dim: dimension to be normalized + """ + + def __init__(self, nout, dim=-1, eps=1e-5): + """Construct an LayerNorm object.""" + super(LayerNorm, self).__init__(nout, eps=eps) + self.dim = dim + + def forward(self, x): + """Apply layer normalization. + :param torch.Tensor x: input tensor + :return: layer normalized tensor + :rtype torch.Tensor + """ + if self.dim == -1: + return super(LayerNorm, self).forward(x) + return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1) + + +class DurationPredictor(torch.nn.Module): + """Duration predictor module. + This is a module of duration predictor described in `FastSpeech: Fast, Robust and Controllable Text to Speech`_. + The duration predictor predicts a duration of each frame in log domain from the hidden embeddings of encoder. + .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`: + https://arxiv.org/pdf/1905.09263.pdf + Note: + The calculation domain of outputs is different between in `forward` and in `inference`. In `forward`, + the outputs are calculated in log domain but in `inference`, those are calculated in linear domain. + """ + + def __init__(self, idim, odims = 1, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0, padding='SAME'): + """Initilize duration predictor module. + Args: + idim (int): Input dimension. + n_layers (int, optional): Number of convolutional layers. + n_chans (int, optional): Number of channels of convolutional layers. + kernel_size (int, optional): Kernel size of convolutional layers. + dropout_rate (float, optional): Dropout rate. + offset (float, optional): Offset value to avoid nan in log domain. + """ + super(DurationPredictor, self).__init__() + self.offset = offset + self.conv = torch.nn.ModuleList() + self.kernel_size = kernel_size + self.padding = padding + for idx in range(n_layers): + in_chans = idim if idx == 0 else n_chans + self.conv += [torch.nn.Sequential( + torch.nn.ConstantPad1d(((kernel_size - 1) // 2, (kernel_size - 1) // 2) + if padding == 'SAME' + else (kernel_size - 1, 0), 0), + torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0), + torch.nn.ReLU(), + LayerNorm(n_chans, dim=1), + torch.nn.Dropout(dropout_rate) + )] + self.linear = torch.nn.Linear(n_chans, odims) + + def _forward(self, xs, x_masks=None, is_inference=False): + xs = xs.transpose(1, -1) # (B, idim, Tmax) + for f in self.conv: + xs = f(xs) # (B, C, Tmax) + if x_masks is not None: + xs = xs * (1 - x_masks.float())[:, None, :] + + xs = self.linear(xs.transpose(1, -1)) # [B, T, C] + xs = xs * (1 - x_masks.float())[:, :, None] # (B, T, C) + if is_inference: + return self.out2dur(xs), xs + else: + if hparams['dur_loss'] in ['mse']: + xs = xs.squeeze(-1) # (B, Tmax) + return xs + + def out2dur(self, xs): + if hparams['dur_loss'] in ['mse']: + # NOTE: calculate in log domain + xs = xs.squeeze(-1) # (B, Tmax) + dur = torch.clamp(torch.round(xs.exp() - self.offset), min=0).long() # avoid negative value + elif hparams['dur_loss'] == 'mog': + return NotImplementedError + elif hparams['dur_loss'] == 'crf': + dur = torch.LongTensor(self.crf.decode(xs)).cuda() + return dur + + def forward(self, xs, x_masks=None): + """Calculate forward propagation. + Args: + xs (Tensor): Batch of input sequences (B, Tmax, idim). + x_masks (ByteTensor, optional): Batch of masks indicating padded part (B, Tmax). + Returns: + Tensor: Batch of predicted durations in log domain (B, Tmax). + """ + return self._forward(xs, x_masks, False) + + def inference(self, xs, x_masks=None): + """Inference duration. + Args: + xs (Tensor): Batch of input sequences (B, Tmax, idim). + x_masks (ByteTensor, optional): Batch of masks indicating padded part (B, Tmax). + Returns: + LongTensor: Batch of predicted durations in linear domain (B, Tmax). + """ + return self._forward(xs, x_masks, True) + +class SyntaDurationPredictor(torch.nn.Module): + def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0): + super(SyntaDurationPredictor, self).__init__() + from modules.syntaspeech.syntactic_graph_encoder import GraphAuxEnc + self.graph_encoder = GraphAuxEnc(in_dim=idim, hid_dim=idim, out_dim=idim) + self.offset = offset + self.conv = torch.nn.ModuleList() + self.kernel_size = kernel_size + for idx in range(n_layers): + in_chans = idim if idx == 0 else n_chans + self.conv += [torch.nn.Sequential( + torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=kernel_size // 2), + torch.nn.ReLU(), + LayerNorm(n_chans, dim=1), + torch.nn.Dropout(dropout_rate) + )] + self.linear = nn.Sequential(torch.nn.Linear(n_chans, 1), nn.Softplus()) + + def forward(self, x, x_padding=None, ph2word=None, graph_lst=None, etypes_lst=None): + x = x.transpose(1, -1) # (B, idim, Tmax) + assert ph2word is not None and graph_lst is not None and etypes_lst is not None + x_graph = self.graph_encoder(graph_lst, x, ph2word, etypes_lst) + x = x + x_graph * 1. + + for f in self.conv: + x = f(x) # (B, C, Tmax) + if x_padding is not None: + x = x * (1 - x_padding.float())[:, None, :] + + x = self.linear(x.transpose(1, -1)) # [B, T, C] + x = x * (1 - x_padding.float())[:, :, None] # (B, T, C) + x = x[..., 0] # (B, Tmax) + return x + +class LengthRegulator(torch.nn.Module): + def __init__(self, pad_value=0.0): + super(LengthRegulator, self).__init__() + self.pad_value = pad_value + + def forward(self, dur, dur_padding=None, alpha=1.0): + """ + Example (no batch dim version): + 1. dur = [2,2,3] + 2. token_idx = [[1],[2],[3]], dur_cumsum = [2,4,7], dur_cumsum_prev = [0,2,4] + 3. token_mask = [[1,1,0,0,0,0,0], + [0,0,1,1,0,0,0], + [0,0,0,0,1,1,1]] + 4. token_idx * token_mask = [[1,1,0,0,0,0,0], + [0,0,2,2,0,0,0], + [0,0,0,0,3,3,3]] + 5. (token_idx * token_mask).sum(0) = [1,1,2,2,3,3,3] + + :param dur: Batch of durations of each frame (B, T_txt) + :param dur_padding: Batch of padding of each frame (B, T_txt) + :param alpha: duration rescale coefficient + :return: + mel2ph (B, T_speech) + """ + assert alpha > 0 + dur = torch.round(dur.float() * alpha).long() + if dur_padding is not None: + dur = dur * (1 - dur_padding.long()) + token_idx = torch.arange(1, dur.shape[1] + 1)[None, :, None].to(dur.device) + dur_cumsum = torch.cumsum(dur, 1) + dur_cumsum_prev = F.pad(dur_cumsum, [1, -1], mode='constant', value=0) + + pos_idx = torch.arange(dur.sum(-1).max())[None, None].to(dur.device) + token_mask = (pos_idx >= dur_cumsum_prev[:, :, None]) & (pos_idx < dur_cumsum[:, :, None]) + mel2ph = (token_idx * token_mask.long()).sum(1) + return mel2ph + + +class PitchPredictor(torch.nn.Module): + def __init__(self, idim, n_layers=5, n_chans=384, odim=2, kernel_size=5, + dropout_rate=0.1, padding='SAME'): + """Initilize pitch predictor module. + Args: + idim (int): Input dimension. + n_layers (int, optional): Number of convolutional layers. + n_chans (int, optional): Number of channels of convolutional layers. + kernel_size (int, optional): Kernel size of convolutional layers. + dropout_rate (float, optional): Dropout rate. + """ + super(PitchPredictor, self).__init__() + self.conv = torch.nn.ModuleList() + self.kernel_size = kernel_size + self.padding = padding + for idx in range(n_layers): + in_chans = idim if idx == 0 else n_chans + self.conv += [torch.nn.Sequential( + torch.nn.ConstantPad1d(((kernel_size - 1) // 2, (kernel_size - 1) // 2) + if padding == 'SAME' + else (kernel_size - 1, 0), 0), + torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0), + torch.nn.ReLU(), + LayerNorm(n_chans, dim=1), + torch.nn.Dropout(dropout_rate) + )] + self.linear = torch.nn.Linear(n_chans, odim) + self.embed_positions = SinusoidalPositionalEmbedding(idim, 0, init_size=4096) + self.pos_embed_alpha = nn.Parameter(torch.Tensor([1])) + + def forward(self, xs): + """ + + :param xs: [B, T, H] + :return: [B, T, H] + """ + positions = self.pos_embed_alpha * self.embed_positions(xs[..., 0]) + xs = xs + positions + xs = xs.transpose(1, -1) # (B, idim, Tmax) + for f in self.conv: + xs = f(xs) # (B, C, Tmax) + # NOTE: calculate in log domain + xs = self.linear(xs.transpose(1, -1)) # (B, Tmax, H) + return xs + + +class EnergyPredictor(PitchPredictor): + pass + + +def mel2ph_to_dur(mel2ph, T_txt, max_dur=None): + B, _ = mel2ph.shape + dur = mel2ph.new_zeros(B, T_txt + 1).scatter_add(1, mel2ph, torch.ones_like(mel2ph)) + dur = dur[:, 1:] + if max_dur is not None: + dur = dur.clamp(max=max_dur) + return dur + + +class FFTBlocks(nn.Module): + def __init__(self, hidden_size, num_layers, ffn_kernel_size=9, dropout=None, num_heads=2, + use_pos_embed=True, use_last_norm=True, norm='ln', use_pos_embed_alpha=True): + super().__init__() + self.num_layers = num_layers + embed_dim = self.hidden_size = hidden_size + self.dropout = dropout if dropout is not None else hparams['dropout'] + self.use_pos_embed = use_pos_embed + self.use_last_norm = use_last_norm + if use_pos_embed: + self.max_source_positions = DEFAULT_MAX_TARGET_POSITIONS + self.padding_idx = 0 + self.pos_embed_alpha = nn.Parameter(torch.Tensor([1])) if use_pos_embed_alpha else 1 + self.embed_positions = SinusoidalPositionalEmbedding( + embed_dim, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS, + ) + + self.layers = nn.ModuleList([]) + self.layers.extend([ + TransformerEncoderLayer(self.hidden_size, self.dropout, + kernel_size=ffn_kernel_size, num_heads=num_heads) + for _ in range(self.num_layers) + ]) + if self.use_last_norm: + if norm == 'ln': + self.layer_norm = nn.LayerNorm(embed_dim) + elif norm == 'bn': + self.layer_norm = BatchNorm1dTBC(embed_dim) + else: + self.layer_norm = None + + def forward(self, x, padding_mask=None, attn_mask=None, return_hiddens=False): + """ + :param x: [B, T, C] + :param padding_mask: [B, T] + :return: [B, T, C] or [L, B, T, C] + """ + padding_mask = x.abs().sum(-1).eq(0).data if padding_mask is None else padding_mask + nonpadding_mask_TB = 1 - padding_mask.transpose(0, 1).float()[:, :, None] # [T, B, 1] + if self.use_pos_embed: + positions = self.pos_embed_alpha * self.embed_positions(x[..., 0]) + x = x + positions + x = F.dropout(x, p=self.dropout, training=self.training) + # B x T x C -> T x B x C + x = x.transpose(0, 1) * nonpadding_mask_TB + hiddens = [] + for layer in self.layers: + x = layer(x, encoder_padding_mask=padding_mask, attn_mask=attn_mask) * nonpadding_mask_TB + hiddens.append(x) + if self.use_last_norm: + x = self.layer_norm(x) * nonpadding_mask_TB + if return_hiddens: + x = torch.stack(hiddens, 0) # [L, T, B, C] + x = x.transpose(1, 2) # [L, B, T, C] + else: + x = x.transpose(0, 1) # [B, T, C] + return x + + +class FastspeechEncoder(FFTBlocks): + def __init__(self, embed_tokens, hidden_size=None, num_layers=None, kernel_size=None, num_heads=2): + hidden_size = hparams['hidden_size'] if hidden_size is None else hidden_size + kernel_size = hparams['enc_ffn_kernel_size'] if kernel_size is None else kernel_size + num_layers = hparams['dec_layers'] if num_layers is None else num_layers + super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads, + use_pos_embed=False) # use_pos_embed_alpha for compatibility + self.embed_tokens = embed_tokens + self.embed_scale = math.sqrt(hidden_size) + self.padding_idx = 0 + if hparams.get('rel_pos') is not None and hparams['rel_pos']: + self.embed_positions = RelPositionalEncoding(hidden_size, dropout_rate=0.0) + else: + self.embed_positions = SinusoidalPositionalEmbedding( + hidden_size, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS, + ) + + def forward(self, txt_tokens): + """ + + :param txt_tokens: [B, T] + :return: { + 'encoder_out': [T x B x C] + } + """ + encoder_padding_mask = txt_tokens.eq(self.padding_idx).data + x = self.forward_embedding(txt_tokens) # [B, T, H] + x = super(FastspeechEncoder, self).forward(x, encoder_padding_mask) + return x + + def forward_embedding(self, txt_tokens): + # embed tokens and positions + x = self.embed_scale * self.embed_tokens(txt_tokens) + if hparams['use_pos_embed']: + if hparams.get('rel_pos') is not None and hparams['rel_pos']: + x = self.embed_positions(x) + else: + positions = self.embed_positions(txt_tokens) + x = x + positions + x = F.dropout(x, p=self.dropout, training=self.training) + return x + + +class FastspeechDecoder(FFTBlocks): + def __init__(self, hidden_size=None, num_layers=None, kernel_size=None, num_heads=None): + num_heads = hparams['num_heads'] if num_heads is None else num_heads + hidden_size = hparams['hidden_size'] if hidden_size is None else hidden_size + kernel_size = hparams['dec_ffn_kernel_size'] if kernel_size is None else kernel_size + num_layers = hparams['dec_layers'] if num_layers is None else num_layers + super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads) + diff --git a/NeuralSeq/modules/hifigan/__pycache__/hifigan.cpython-37.pyc b/NeuralSeq/modules/hifigan/__pycache__/hifigan.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1b15b430cf380d93503b943f76693ae843d1ae1 Binary files /dev/null and b/NeuralSeq/modules/hifigan/__pycache__/hifigan.cpython-37.pyc differ diff --git a/NeuralSeq/modules/hifigan/__pycache__/hifigan.cpython-38.pyc b/NeuralSeq/modules/hifigan/__pycache__/hifigan.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf3a3429d75cef713cba85628edffe5d2d74e66b Binary files /dev/null and b/NeuralSeq/modules/hifigan/__pycache__/hifigan.cpython-38.pyc differ diff --git a/NeuralSeq/modules/hifigan/hifigan.py b/NeuralSeq/modules/hifigan/hifigan.py new file mode 100644 index 0000000000000000000000000000000000000000..ae7e61f56b00d60bcc49a18ece3edbe54746f7ea --- /dev/null +++ b/NeuralSeq/modules/hifigan/hifigan.py @@ -0,0 +1,365 @@ +import torch +import torch.nn.functional as F +import torch.nn as nn +from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm + +from modules.parallel_wavegan.layers import UpsampleNetwork, ConvInUpsampleNetwork +from modules.parallel_wavegan.models.source import SourceModuleHnNSF +import numpy as np + +LRELU_SLOPE = 0.1 + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def apply_weight_norm(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + weight_norm(m) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +class ResBlock1(torch.nn.Module): + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): + super(ResBlock1, self).__init__() + self.h = h + self.convs1 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]))) + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))) + ]) + self.convs2.apply(init_weights) + + def forward(self, x): + for c1, c2 in zip(self.convs1, self.convs2): + xt = F.leaky_relu(x, LRELU_SLOPE) + xt = c1(xt) + xt = F.leaky_relu(xt, LRELU_SLOPE) + xt = c2(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + + +class ResBlock2(torch.nn.Module): + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): + super(ResBlock2, self).__init__() + self.h = h + self.convs = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))) + ]) + self.convs.apply(init_weights) + + def forward(self, x): + for c in self.convs: + xt = F.leaky_relu(x, LRELU_SLOPE) + xt = c(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class Conv1d1x1(Conv1d): + """1x1 Conv1d with customized initialization.""" + + def __init__(self, in_channels, out_channels, bias): + """Initialize 1x1 Conv1d module.""" + super(Conv1d1x1, self).__init__(in_channels, out_channels, + kernel_size=1, padding=0, + dilation=1, bias=bias) + + +class HifiGanGenerator(torch.nn.Module): + def __init__(self, h, c_out=1): + super(HifiGanGenerator, self).__init__() + self.h = h + self.num_kernels = len(h['resblock_kernel_sizes']) + self.num_upsamples = len(h['upsample_rates']) + + if h['use_pitch_embed']: + self.harmonic_num = 8 + self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h['upsample_rates'])) + self.m_source = SourceModuleHnNSF( + sampling_rate=h['audio_sample_rate'], + harmonic_num=self.harmonic_num) + self.noise_convs = nn.ModuleList() + self.conv_pre = weight_norm(Conv1d(80, h['upsample_initial_channel'], 7, 1, padding=3)) + resblock = ResBlock1 if h['resblock'] == '1' else ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(h['upsample_rates'], h['upsample_kernel_sizes'])): + c_cur = h['upsample_initial_channel'] // (2 ** (i + 1)) + self.ups.append(weight_norm( + ConvTranspose1d(c_cur * 2, c_cur, k, u, padding=(k - u) // 2))) + if h['use_pitch_embed']: + if i + 1 < len(h['upsample_rates']): + stride_f0 = np.prod(h['upsample_rates'][i + 1:]) + self.noise_convs.append(Conv1d( + 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2)) + else: + self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = h['upsample_initial_channel'] // (2 ** (i + 1)) + for j, (k, d) in enumerate(zip(h['resblock_kernel_sizes'], h['resblock_dilation_sizes'])): + self.resblocks.append(resblock(h, ch, k, d)) + + self.conv_post = weight_norm(Conv1d(ch, c_out, 7, 1, padding=3)) + self.ups.apply(init_weights) + self.conv_post.apply(init_weights) + + def forward(self, x, f0=None): + if f0 is not None: + # harmonic-source signal, noise-source signal, uv flag + f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) + har_source, noi_source, uv = self.m_source(f0) + har_source = har_source.transpose(1, 2) + + x = self.conv_pre(x) + for i in range(self.num_upsamples): + x = F.leaky_relu(x, LRELU_SLOPE) + x = self.ups[i](x) + if f0 is not None: + x_source = self.noise_convs[i](har_source) + x = x + x_source + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + remove_weight_norm(self.conv_pre) + remove_weight_norm(self.conv_post) + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False, use_cond=False, c_in=1): + super(DiscriminatorP, self).__init__() + self.use_cond = use_cond + if use_cond: + from utils.hparams import hparams + t = hparams['hop_size'] + self.cond_net = torch.nn.ConvTranspose1d(80, 1, t * 2, stride=t, padding=t // 2) + c_in = 2 + + self.period = period + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv2d(c_in, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), + ]) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x, mel): + fmap = [] + if self.use_cond: + x_mel = self.cond_net(mel) + x = torch.cat([x_mel, x], 1) + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_cond=False, c_in=1): + super(MultiPeriodDiscriminator, self).__init__() + self.discriminators = nn.ModuleList([ + DiscriminatorP(2, use_cond=use_cond, c_in=c_in), + DiscriminatorP(3, use_cond=use_cond, c_in=c_in), + DiscriminatorP(5, use_cond=use_cond, c_in=c_in), + DiscriminatorP(7, use_cond=use_cond, c_in=c_in), + DiscriminatorP(11, use_cond=use_cond, c_in=c_in), + ]) + + def forward(self, y, y_hat, mel=None): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y, mel) + y_d_g, fmap_g = d(y_hat, mel) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False, use_cond=False, upsample_rates=None, c_in=1): + super(DiscriminatorS, self).__init__() + self.use_cond = use_cond + if use_cond: + t = np.prod(upsample_rates) + self.cond_net = torch.nn.ConvTranspose1d(80, 1, t * 2, stride=t, padding=t // 2) + c_in = 2 + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv1d(c_in, 128, 15, 1, padding=7)), + norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), + norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), + norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ]) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x, mel): + if self.use_cond: + x_mel = self.cond_net(mel) + x = torch.cat([x_mel, x], 1) + fmap = [] + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiScaleDiscriminator(torch.nn.Module): + def __init__(self, use_cond=False, c_in=1): + super(MultiScaleDiscriminator, self).__init__() + from utils.hparams import hparams + self.discriminators = nn.ModuleList([ + DiscriminatorS(use_spectral_norm=True, use_cond=use_cond, + upsample_rates=[4, 4, hparams['hop_size'] // 16], + c_in=c_in), + DiscriminatorS(use_cond=use_cond, + upsample_rates=[4, 4, hparams['hop_size'] // 32], + c_in=c_in), + DiscriminatorS(use_cond=use_cond, + upsample_rates=[4, 4, hparams['hop_size'] // 64], + c_in=c_in), + ]) + self.meanpools = nn.ModuleList([ + AvgPool1d(4, 2, padding=1), + AvgPool1d(4, 2, padding=1) + ]) + + def forward(self, y, y_hat, mel=None): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + if i != 0: + y = self.meanpools[i - 1](y) + y_hat = self.meanpools[i - 1](y_hat) + y_d_r, fmap_r = d(y, mel) + y_d_g, fmap_g = d(y_hat, mel) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +def feature_loss(fmap_r, fmap_g): + loss = 0 + for dr, dg in zip(fmap_r, fmap_g): + for rl, gl in zip(dr, dg): + loss += torch.mean(torch.abs(rl - gl)) + + return loss * 2 + + +def discriminator_loss(disc_real_outputs, disc_generated_outputs): + r_losses = 0 + g_losses = 0 + for dr, dg in zip(disc_real_outputs, disc_generated_outputs): + r_loss = torch.mean((1 - dr) ** 2) + g_loss = torch.mean(dg ** 2) + r_losses += r_loss + g_losses += g_loss + r_losses = r_losses / len(disc_real_outputs) + g_losses = g_losses / len(disc_real_outputs) + return r_losses, g_losses + + +def cond_discriminator_loss(outputs): + loss = 0 + for dg in outputs: + g_loss = torch.mean(dg ** 2) + loss += g_loss + loss = loss / len(outputs) + return loss + + +def generator_loss(disc_outputs): + loss = 0 + for dg in disc_outputs: + l = torch.mean((1 - dg) ** 2) + loss += l + loss = loss / len(disc_outputs) + return loss diff --git a/NeuralSeq/modules/hifigan/mel_utils.py b/NeuralSeq/modules/hifigan/mel_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..06e0f7d4d16fa3e4aefc8949347455f5a6e938da --- /dev/null +++ b/NeuralSeq/modules/hifigan/mel_utils.py @@ -0,0 +1,80 @@ +import numpy as np +import torch +import torch.utils.data +from librosa.filters import mel as librosa_mel_fn +from scipy.io.wavfile import read + +MAX_WAV_VALUE = 32768.0 + + +def load_wav(full_path): + sampling_rate, data = read(full_path) + return data, sampling_rate + + +def dynamic_range_compression(x, C=1, clip_val=1e-5): + return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) + + +def dynamic_range_decompression(x, C=1): + return np.exp(x) / C + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): + return torch.log(torch.clamp(x, min=clip_val) * C) + + +def dynamic_range_decompression_torch(x, C=1): + return torch.exp(x) / C + + +def spectral_normalize_torch(magnitudes): + output = dynamic_range_compression_torch(magnitudes) + return output + + +def spectral_de_normalize_torch(magnitudes): + output = dynamic_range_decompression_torch(magnitudes) + return output + + +mel_basis = {} +hann_window = {} + + +def mel_spectrogram(y, hparams, center=False, complex=False): + # hop_size: 512 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate) + # win_size: 2048 # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate) + # fmin: 55 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) + # fmax: 10000 # To be increased/reduced depending on data. + # fft_size: 2048 # Extra window size is filled with 0 paddings to match this parameter + # n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, + n_fft = hparams['fft_size'] + num_mels = hparams['audio_num_mel_bins'] + sampling_rate = hparams['audio_sample_rate'] + hop_size = hparams['hop_size'] + win_size = hparams['win_size'] + fmin = hparams['fmin'] + fmax = hparams['fmax'] + y = y.clamp(min=-1., max=1.) + global mel_basis, hann_window + if fmax not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[str(fmax) + '_' + str(y.device)] = torch.from_numpy(mel).float().to(y.device) + hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) + + y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), + mode='reflect') + y = y.squeeze(1) + + spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)], + center=center, pad_mode='reflect', normalized=False, onesided=True) + + if not complex: + spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9)) + spec = torch.matmul(mel_basis[str(fmax) + '_' + str(y.device)], spec) + spec = spectral_normalize_torch(spec) + else: + B, C, T, _ = spec.shape + spec = spec.transpose(1, 2) # [B, T, n_fft, 2] + return spec diff --git a/NeuralSeq/modules/parallel_wavegan/__init__.py b/NeuralSeq/modules/parallel_wavegan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/NeuralSeq/modules/parallel_wavegan/__pycache__/__init__.cpython-37.pyc b/NeuralSeq/modules/parallel_wavegan/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab6449f0e9546d6f8d5d451b2b509bd007e082b9 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/__pycache__/__init__.cpython-37.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/__pycache__/__init__.cpython-38.pyc b/NeuralSeq/modules/parallel_wavegan/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..032fc50dd2c3c20aef32fb51ff3542fcf41bb26d Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/__pycache__/__init__.cpython-38.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/layers/__init__.py b/NeuralSeq/modules/parallel_wavegan/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e477f51116a3157781b1aefefbaf32fe4d4bd1f0 --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/layers/__init__.py @@ -0,0 +1,5 @@ +from .causal_conv import * # NOQA +from .pqmf import * # NOQA +from .residual_block import * # NOQA +from modules.parallel_wavegan.layers.residual_stack import * # NOQA +from .upsample import * # NOQA diff --git a/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/__init__.cpython-37.pyc b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19491565c4362ecd9ee4a47318f337fe5507666a Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/__init__.cpython-37.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/__init__.cpython-38.pyc b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf41e5f0f8fa03f54a48654452eeb6d00413711c Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/__init__.cpython-38.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/causal_conv.cpython-37.pyc b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/causal_conv.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75b6c49671cab8e8d229533a4132c5165c859bf7 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/causal_conv.cpython-37.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/causal_conv.cpython-38.pyc b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/causal_conv.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b638fc2081e706743184242949301e9eab787a59 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/causal_conv.cpython-38.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/pqmf.cpython-37.pyc b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/pqmf.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..325a96798c2a0a84d24d01c8491b6e255c7edd08 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/pqmf.cpython-37.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/pqmf.cpython-38.pyc b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/pqmf.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10765b4afbffcece6dee116f62282573d7a7ae49 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/pqmf.cpython-38.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/residual_block.cpython-37.pyc b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/residual_block.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..208d682bfd96dc5f6129d0466828e409f25ca36f Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/residual_block.cpython-37.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/residual_block.cpython-38.pyc b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/residual_block.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c61b091f4eee43faac5d00b327168a3f1d15f1c8 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/residual_block.cpython-38.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/residual_stack.cpython-37.pyc b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/residual_stack.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6062d5b911ad79f83af3207c3827b33628d8c269 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/residual_stack.cpython-37.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/residual_stack.cpython-38.pyc b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/residual_stack.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8705bfc4a767536c006d8ac73661a8ec7c60eedc Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/residual_stack.cpython-38.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/upsample.cpython-37.pyc b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/upsample.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fe9ed9c257fb0a5d734db5e2a4b3e4a2b674f64 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/upsample.cpython-37.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/upsample.cpython-38.pyc b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/upsample.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d073c8713b38f4039f353c9ac8e73c0495adeefe Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/layers/__pycache__/upsample.cpython-38.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/layers/causal_conv.py b/NeuralSeq/modules/parallel_wavegan/layers/causal_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..fca77daf65f234e6fbe355ed148fc8f0ee85038a --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/layers/causal_conv.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""Causal convolusion layer modules.""" + + +import torch + + +class CausalConv1d(torch.nn.Module): + """CausalConv1d module with customized initialization.""" + + def __init__(self, in_channels, out_channels, kernel_size, + dilation=1, bias=True, pad="ConstantPad1d", pad_params={"value": 0.0}): + """Initialize CausalConv1d module.""" + super(CausalConv1d, self).__init__() + self.pad = getattr(torch.nn, pad)((kernel_size - 1) * dilation, **pad_params) + self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size, + dilation=dilation, bias=bias) + + def forward(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, in_channels, T). + + Returns: + Tensor: Output tensor (B, out_channels, T). + + """ + return self.conv(self.pad(x))[:, :, :x.size(2)] + + +class CausalConvTranspose1d(torch.nn.Module): + """CausalConvTranspose1d module with customized initialization.""" + + def __init__(self, in_channels, out_channels, kernel_size, stride, bias=True): + """Initialize CausalConvTranspose1d module.""" + super(CausalConvTranspose1d, self).__init__() + self.deconv = torch.nn.ConvTranspose1d( + in_channels, out_channels, kernel_size, stride, bias=bias) + self.stride = stride + + def forward(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, in_channels, T_in). + + Returns: + Tensor: Output tensor (B, out_channels, T_out). + + """ + return self.deconv(x)[:, :, :-self.stride] diff --git a/NeuralSeq/modules/parallel_wavegan/layers/pqmf.py b/NeuralSeq/modules/parallel_wavegan/layers/pqmf.py new file mode 100644 index 0000000000000000000000000000000000000000..ac21074fd32a370a099fa2facb62cfd3253d7579 --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/layers/pqmf.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""Pseudo QMF modules.""" + +import numpy as np +import torch +import torch.nn.functional as F + +from scipy.signal import kaiser + + +def design_prototype_filter(taps=62, cutoff_ratio=0.15, beta=9.0): + """Design prototype filter for PQMF. + + This method is based on `A Kaiser window approach for the design of prototype + filters of cosine modulated filterbanks`_. + + Args: + taps (int): The number of filter taps. + cutoff_ratio (float): Cut-off frequency ratio. + beta (float): Beta coefficient for kaiser window. + + Returns: + ndarray: Impluse response of prototype filter (taps + 1,). + + .. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`: + https://ieeexplore.ieee.org/abstract/document/681427 + + """ + # check the arguments are valid + assert taps % 2 == 0, "The number of taps mush be even number." + assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0." + + # make initial filter + omega_c = np.pi * cutoff_ratio + with np.errstate(invalid='ignore'): + h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) \ + / (np.pi * (np.arange(taps + 1) - 0.5 * taps)) + h_i[taps // 2] = np.cos(0) * cutoff_ratio # fix nan due to indeterminate form + + # apply kaiser window + w = kaiser(taps + 1, beta) + h = h_i * w + + return h + + +class PQMF(torch.nn.Module): + """PQMF module. + + This module is based on `Near-perfect-reconstruction pseudo-QMF banks`_. + + .. _`Near-perfect-reconstruction pseudo-QMF banks`: + https://ieeexplore.ieee.org/document/258122 + + """ + + def __init__(self, subbands=4, taps=62, cutoff_ratio=0.15, beta=9.0): + """Initilize PQMF module. + + Args: + subbands (int): The number of subbands. + taps (int): The number of filter taps. + cutoff_ratio (float): Cut-off frequency ratio. + beta (float): Beta coefficient for kaiser window. + + """ + super(PQMF, self).__init__() + + # define filter coefficient + h_proto = design_prototype_filter(taps, cutoff_ratio, beta) + h_analysis = np.zeros((subbands, len(h_proto))) + h_synthesis = np.zeros((subbands, len(h_proto))) + for k in range(subbands): + h_analysis[k] = 2 * h_proto * np.cos( + (2 * k + 1) * (np.pi / (2 * subbands)) * + (np.arange(taps + 1) - ((taps - 1) / 2)) + + (-1) ** k * np.pi / 4) + h_synthesis[k] = 2 * h_proto * np.cos( + (2 * k + 1) * (np.pi / (2 * subbands)) * + (np.arange(taps + 1) - ((taps - 1) / 2)) - + (-1) ** k * np.pi / 4) + + # convert to tensor + analysis_filter = torch.from_numpy(h_analysis).float().unsqueeze(1) + synthesis_filter = torch.from_numpy(h_synthesis).float().unsqueeze(0) + + # register coefficients as beffer + self.register_buffer("analysis_filter", analysis_filter) + self.register_buffer("synthesis_filter", synthesis_filter) + + # filter for downsampling & upsampling + updown_filter = torch.zeros((subbands, subbands, subbands)).float() + for k in range(subbands): + updown_filter[k, k, 0] = 1.0 + self.register_buffer("updown_filter", updown_filter) + self.subbands = subbands + + # keep padding info + self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0) + + def analysis(self, x): + """Analysis with PQMF. + + Args: + x (Tensor): Input tensor (B, 1, T). + + Returns: + Tensor: Output tensor (B, subbands, T // subbands). + + """ + x = F.conv1d(self.pad_fn(x), self.analysis_filter) + return F.conv1d(x, self.updown_filter, stride=self.subbands) + + def synthesis(self, x): + """Synthesis with PQMF. + + Args: + x (Tensor): Input tensor (B, subbands, T // subbands). + + Returns: + Tensor: Output tensor (B, 1, T). + + """ + x = F.conv_transpose1d(x, self.updown_filter * self.subbands, stride=self.subbands) + return F.conv1d(self.pad_fn(x), self.synthesis_filter) diff --git a/NeuralSeq/modules/parallel_wavegan/layers/residual_block.py b/NeuralSeq/modules/parallel_wavegan/layers/residual_block.py new file mode 100644 index 0000000000000000000000000000000000000000..7a267a86c1fa521c2824addf9dda304c43f1ff1f --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/layers/residual_block.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- + +"""Residual block module in WaveNet. + +This code is modified from https://github.com/r9y9/wavenet_vocoder. + +""" + +import math + +import torch +import torch.nn.functional as F + + +class Conv1d(torch.nn.Conv1d): + """Conv1d module with customized initialization.""" + + def __init__(self, *args, **kwargs): + """Initialize Conv1d module.""" + super(Conv1d, self).__init__(*args, **kwargs) + + def reset_parameters(self): + """Reset parameters.""" + torch.nn.init.kaiming_normal_(self.weight, nonlinearity="relu") + if self.bias is not None: + torch.nn.init.constant_(self.bias, 0.0) + + +class Conv1d1x1(Conv1d): + """1x1 Conv1d with customized initialization.""" + + def __init__(self, in_channels, out_channels, bias): + """Initialize 1x1 Conv1d module.""" + super(Conv1d1x1, self).__init__(in_channels, out_channels, + kernel_size=1, padding=0, + dilation=1, bias=bias) + + +class ResidualBlock(torch.nn.Module): + """Residual block module in WaveNet.""" + + def __init__(self, + kernel_size=3, + residual_channels=64, + gate_channels=128, + skip_channels=64, + aux_channels=80, + dropout=0.0, + dilation=1, + bias=True, + use_causal_conv=False + ): + """Initialize ResidualBlock module. + + Args: + kernel_size (int): Kernel size of dilation convolution layer. + residual_channels (int): Number of channels for residual connection. + skip_channels (int): Number of channels for skip connection. + aux_channels (int): Local conditioning channels i.e. auxiliary input dimension. + dropout (float): Dropout probability. + dilation (int): Dilation factor. + bias (bool): Whether to add bias parameter in convolution layers. + use_causal_conv (bool): Whether to use use_causal_conv or non-use_causal_conv convolution. + + """ + super(ResidualBlock, self).__init__() + self.dropout = dropout + # no future time stamps available + if use_causal_conv: + padding = (kernel_size - 1) * dilation + else: + assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." + padding = (kernel_size - 1) // 2 * dilation + self.use_causal_conv = use_causal_conv + + # dilation conv + self.conv = Conv1d(residual_channels, gate_channels, kernel_size, + padding=padding, dilation=dilation, bias=bias) + + # local conditioning + if aux_channels > 0: + self.conv1x1_aux = Conv1d1x1(aux_channels, gate_channels, bias=False) + else: + self.conv1x1_aux = None + + # conv output is split into two groups + gate_out_channels = gate_channels // 2 + self.conv1x1_out = Conv1d1x1(gate_out_channels, residual_channels, bias=bias) + self.conv1x1_skip = Conv1d1x1(gate_out_channels, skip_channels, bias=bias) + + def forward(self, x, c): + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, residual_channels, T). + c (Tensor): Local conditioning auxiliary tensor (B, aux_channels, T). + + Returns: + Tensor: Output tensor for residual connection (B, residual_channels, T). + Tensor: Output tensor for skip connection (B, skip_channels, T). + + """ + residual = x + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.conv(x) + + # remove future time steps if use_causal_conv conv + x = x[:, :, :residual.size(-1)] if self.use_causal_conv else x + + # split into two part for gated activation + splitdim = 1 + xa, xb = x.split(x.size(splitdim) // 2, dim=splitdim) + + # local conditioning + if c is not None: + assert self.conv1x1_aux is not None + c = self.conv1x1_aux(c) + ca, cb = c.split(c.size(splitdim) // 2, dim=splitdim) + xa, xb = xa + ca, xb + cb + + x = torch.tanh(xa) * torch.sigmoid(xb) + + # for skip connection + s = self.conv1x1_skip(x) + + # for residual connection + x = (self.conv1x1_out(x) + residual) * math.sqrt(0.5) + + return x, s diff --git a/NeuralSeq/modules/parallel_wavegan/layers/residual_stack.py b/NeuralSeq/modules/parallel_wavegan/layers/residual_stack.py new file mode 100644 index 0000000000000000000000000000000000000000..6e07c8803ad348dd923f6b7c0f7aff14aab9cf78 --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/layers/residual_stack.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""Residual stack module in MelGAN.""" + +import torch + +from . import CausalConv1d + + +class ResidualStack(torch.nn.Module): + """Residual stack module introduced in MelGAN.""" + + def __init__(self, + kernel_size=3, + channels=32, + dilation=1, + bias=True, + nonlinear_activation="LeakyReLU", + nonlinear_activation_params={"negative_slope": 0.2}, + pad="ReflectionPad1d", + pad_params={}, + use_causal_conv=False, + ): + """Initialize ResidualStack module. + + Args: + kernel_size (int): Kernel size of dilation convolution layer. + channels (int): Number of channels of convolution layers. + dilation (int): Dilation factor. + bias (bool): Whether to add bias parameter in convolution layers. + nonlinear_activation (str): Activation function module name. + nonlinear_activation_params (dict): Hyperparameters for activation function. + pad (str): Padding function module name before dilated convolution layer. + pad_params (dict): Hyperparameters for padding function. + use_causal_conv (bool): Whether to use causal convolution. + + """ + super(ResidualStack, self).__init__() + + # defile residual stack part + if not use_causal_conv: + assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." + self.stack = torch.nn.Sequential( + getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), + getattr(torch.nn, pad)((kernel_size - 1) // 2 * dilation, **pad_params), + torch.nn.Conv1d(channels, channels, kernel_size, dilation=dilation, bias=bias), + getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), + torch.nn.Conv1d(channels, channels, 1, bias=bias), + ) + else: + self.stack = torch.nn.Sequential( + getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), + CausalConv1d(channels, channels, kernel_size, dilation=dilation, + bias=bias, pad=pad, pad_params=pad_params), + getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), + torch.nn.Conv1d(channels, channels, 1, bias=bias), + ) + + # defile extra layer for skip connection + self.skip_layer = torch.nn.Conv1d(channels, channels, 1, bias=bias) + + def forward(self, c): + """Calculate forward propagation. + + Args: + c (Tensor): Input tensor (B, channels, T). + + Returns: + Tensor: Output tensor (B, chennels, T). + + """ + return self.stack(c) + self.skip_layer(c) diff --git a/NeuralSeq/modules/parallel_wavegan/layers/tf_layers.py b/NeuralSeq/modules/parallel_wavegan/layers/tf_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..c0f46bd755c161cda2ac904fe37f3f3c6357a88d --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/layers/tf_layers.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 MINH ANH (@dathudeptrai) +# MIT License (https://opensource.org/licenses/MIT) + +"""Tensorflow Layer modules complatible with pytorch.""" + +import tensorflow as tf + + +class TFReflectionPad1d(tf.keras.layers.Layer): + """Tensorflow ReflectionPad1d module.""" + + def __init__(self, padding_size): + """Initialize TFReflectionPad1d module. + + Args: + padding_size (int): Padding size. + + """ + super(TFReflectionPad1d, self).__init__() + self.padding_size = padding_size + + @tf.function + def call(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, T, 1, C). + + Returns: + Tensor: Padded tensor (B, T + 2 * padding_size, 1, C). + + """ + return tf.pad(x, [[0, 0], [self.padding_size, self.padding_size], [0, 0], [0, 0]], "REFLECT") + + +class TFConvTranspose1d(tf.keras.layers.Layer): + """Tensorflow ConvTranspose1d module.""" + + def __init__(self, channels, kernel_size, stride, padding): + """Initialize TFConvTranspose1d( module. + + Args: + channels (int): Number of channels. + kernel_size (int): kernel size. + strides (int): Stride width. + padding (str): Padding type ("same" or "valid"). + + """ + super(TFConvTranspose1d, self).__init__() + self.conv1d_transpose = tf.keras.layers.Conv2DTranspose( + filters=channels, + kernel_size=(kernel_size, 1), + strides=(stride, 1), + padding=padding, + ) + + @tf.function + def call(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, T, 1, C). + + Returns: + Tensors: Output tensor (B, T', 1, C'). + + """ + x = self.conv1d_transpose(x) + return x + + +class TFResidualStack(tf.keras.layers.Layer): + """Tensorflow ResidualStack module.""" + + def __init__(self, + kernel_size, + channels, + dilation, + bias, + nonlinear_activation, + nonlinear_activation_params, + padding, + ): + """Initialize TFResidualStack module. + + Args: + kernel_size (int): Kernel size. + channles (int): Number of channels. + dilation (int): Dilation ine. + bias (bool): Whether to add bias parameter in convolution layers. + nonlinear_activation (str): Activation function module name. + nonlinear_activation_params (dict): Hyperparameters for activation function. + padding (str): Padding type ("same" or "valid"). + + """ + super(TFResidualStack, self).__init__() + self.block = [ + getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params), + TFReflectionPad1d(dilation), + tf.keras.layers.Conv2D( + filters=channels, + kernel_size=(kernel_size, 1), + dilation_rate=(dilation, 1), + use_bias=bias, + padding="valid", + ), + getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params), + tf.keras.layers.Conv2D(filters=channels, kernel_size=1, use_bias=bias) + ] + self.shortcut = tf.keras.layers.Conv2D(filters=channels, kernel_size=1, use_bias=bias) + + @tf.function + def call(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, T, 1, C). + + Returns: + Tensor: Output tensor (B, T, 1, C). + + """ + _x = tf.identity(x) + for i, layer in enumerate(self.block): + _x = layer(_x) + shortcut = self.shortcut(x) + return shortcut + _x diff --git a/NeuralSeq/modules/parallel_wavegan/layers/upsample.py b/NeuralSeq/modules/parallel_wavegan/layers/upsample.py new file mode 100644 index 0000000000000000000000000000000000000000..18c6397c420a81fadc5320e3a48f3249534decd8 --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/layers/upsample.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- + +"""Upsampling module. + +This code is modified from https://github.com/r9y9/wavenet_vocoder. + +""" + +import numpy as np +import torch +import torch.nn.functional as F + +from . import Conv1d + + +class Stretch2d(torch.nn.Module): + """Stretch2d module.""" + + def __init__(self, x_scale, y_scale, mode="nearest"): + """Initialize Stretch2d module. + + Args: + x_scale (int): X scaling factor (Time axis in spectrogram). + y_scale (int): Y scaling factor (Frequency axis in spectrogram). + mode (str): Interpolation mode. + + """ + super(Stretch2d, self).__init__() + self.x_scale = x_scale + self.y_scale = y_scale + self.mode = mode + + def forward(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, C, F, T). + + Returns: + Tensor: Interpolated tensor (B, C, F * y_scale, T * x_scale), + + """ + return F.interpolate( + x, scale_factor=(self.y_scale, self.x_scale), mode=self.mode) + + +class Conv2d(torch.nn.Conv2d): + """Conv2d module with customized initialization.""" + + def __init__(self, *args, **kwargs): + """Initialize Conv2d module.""" + super(Conv2d, self).__init__(*args, **kwargs) + + def reset_parameters(self): + """Reset parameters.""" + self.weight.data.fill_(1. / np.prod(self.kernel_size)) + if self.bias is not None: + torch.nn.init.constant_(self.bias, 0.0) + + +class UpsampleNetwork(torch.nn.Module): + """Upsampling network module.""" + + def __init__(self, + upsample_scales, + nonlinear_activation=None, + nonlinear_activation_params={}, + interpolate_mode="nearest", + freq_axis_kernel_size=1, + use_causal_conv=False, + ): + """Initialize upsampling network module. + + Args: + upsample_scales (list): List of upsampling scales. + nonlinear_activation (str): Activation function name. + nonlinear_activation_params (dict): Arguments for specified activation function. + interpolate_mode (str): Interpolation mode. + freq_axis_kernel_size (int): Kernel size in the direction of frequency axis. + + """ + super(UpsampleNetwork, self).__init__() + self.use_causal_conv = use_causal_conv + self.up_layers = torch.nn.ModuleList() + for scale in upsample_scales: + # interpolation layer + stretch = Stretch2d(scale, 1, interpolate_mode) + self.up_layers += [stretch] + + # conv layer + assert (freq_axis_kernel_size - 1) % 2 == 0, "Not support even number freq axis kernel size." + freq_axis_padding = (freq_axis_kernel_size - 1) // 2 + kernel_size = (freq_axis_kernel_size, scale * 2 + 1) + if use_causal_conv: + padding = (freq_axis_padding, scale * 2) + else: + padding = (freq_axis_padding, scale) + conv = Conv2d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) + self.up_layers += [conv] + + # nonlinear + if nonlinear_activation is not None: + nonlinear = getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params) + self.up_layers += [nonlinear] + + def forward(self, c): + """Calculate forward propagation. + + Args: + c : Input tensor (B, C, T). + + Returns: + Tensor: Upsampled tensor (B, C, T'), where T' = T * prod(upsample_scales). + + """ + c = c.unsqueeze(1) # (B, 1, C, T) + for f in self.up_layers: + if self.use_causal_conv and isinstance(f, Conv2d): + c = f(c)[..., :c.size(-1)] + else: + c = f(c) + return c.squeeze(1) # (B, C, T') + + +class ConvInUpsampleNetwork(torch.nn.Module): + """Convolution + upsampling network module.""" + + def __init__(self, + upsample_scales, + nonlinear_activation=None, + nonlinear_activation_params={}, + interpolate_mode="nearest", + freq_axis_kernel_size=1, + aux_channels=80, + aux_context_window=0, + use_causal_conv=False + ): + """Initialize convolution + upsampling network module. + + Args: + upsample_scales (list): List of upsampling scales. + nonlinear_activation (str): Activation function name. + nonlinear_activation_params (dict): Arguments for specified activation function. + mode (str): Interpolation mode. + freq_axis_kernel_size (int): Kernel size in the direction of frequency axis. + aux_channels (int): Number of channels of pre-convolutional layer. + aux_context_window (int): Context window size of the pre-convolutional layer. + use_causal_conv (bool): Whether to use causal structure. + + """ + super(ConvInUpsampleNetwork, self).__init__() + self.aux_context_window = aux_context_window + self.use_causal_conv = use_causal_conv and aux_context_window > 0 + # To capture wide-context information in conditional features + kernel_size = aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1 + # NOTE(kan-bayashi): Here do not use padding because the input is already padded + self.conv_in = Conv1d(aux_channels, aux_channels, kernel_size=kernel_size, bias=False) + self.upsample = UpsampleNetwork( + upsample_scales=upsample_scales, + nonlinear_activation=nonlinear_activation, + nonlinear_activation_params=nonlinear_activation_params, + interpolate_mode=interpolate_mode, + freq_axis_kernel_size=freq_axis_kernel_size, + use_causal_conv=use_causal_conv, + ) + + def forward(self, c): + """Calculate forward propagation. + + Args: + c : Input tensor (B, C, T'). + + Returns: + Tensor: Upsampled tensor (B, C, T), + where T = (T' - aux_context_window * 2) * prod(upsample_scales). + + Note: + The length of inputs considers the context window size. + + """ + c_ = self.conv_in(c) + c = c_[:, :, :-self.aux_context_window] if self.use_causal_conv else c_ + return self.upsample(c) diff --git a/NeuralSeq/modules/parallel_wavegan/losses/__init__.py b/NeuralSeq/modules/parallel_wavegan/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b03080a907cb5cb4b316ceb74866ddbc406b33bf --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/losses/__init__.py @@ -0,0 +1 @@ +from .stft_loss import * # NOQA diff --git a/NeuralSeq/modules/parallel_wavegan/losses/stft_loss.py b/NeuralSeq/modules/parallel_wavegan/losses/stft_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..74d2aa21ad30ba094c406366e652067462f49cd2 --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/losses/stft_loss.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- + +# Copyright 2019 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""STFT-based Loss modules.""" + +import torch +import torch.nn.functional as F + + +def stft(x, fft_size, hop_size, win_length, window): + """Perform STFT and convert to magnitude spectrogram. + + Args: + x (Tensor): Input signal tensor (B, T). + fft_size (int): FFT size. + hop_size (int): Hop size. + win_length (int): Window length. + window (str): Window function type. + + Returns: + Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1). + + """ + x_stft = torch.stft(x, fft_size, hop_size, win_length, window) + real = x_stft[..., 0] + imag = x_stft[..., 1] + + # NOTE(kan-bayashi): clamp is needed to avoid nan or inf + return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1) + + +class SpectralConvergengeLoss(torch.nn.Module): + """Spectral convergence loss module.""" + + def __init__(self): + """Initilize spectral convergence loss module.""" + super(SpectralConvergengeLoss, self).__init__() + + def forward(self, x_mag, y_mag): + """Calculate forward propagation. + + Args: + x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). + y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). + + Returns: + Tensor: Spectral convergence loss value. + + """ + return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro") + + +class LogSTFTMagnitudeLoss(torch.nn.Module): + """Log STFT magnitude loss module.""" + + def __init__(self): + """Initilize los STFT magnitude loss module.""" + super(LogSTFTMagnitudeLoss, self).__init__() + + def forward(self, x_mag, y_mag): + """Calculate forward propagation. + + Args: + x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). + y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). + + Returns: + Tensor: Log STFT magnitude loss value. + + """ + return F.l1_loss(torch.log(y_mag), torch.log(x_mag)) + + +class STFTLoss(torch.nn.Module): + """STFT loss module.""" + + def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window"): + """Initialize STFT loss module.""" + super(STFTLoss, self).__init__() + self.fft_size = fft_size + self.shift_size = shift_size + self.win_length = win_length + self.window = getattr(torch, window)(win_length) + self.spectral_convergenge_loss = SpectralConvergengeLoss() + self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss() + + def forward(self, x, y): + """Calculate forward propagation. + + Args: + x (Tensor): Predicted signal (B, T). + y (Tensor): Groundtruth signal (B, T). + + Returns: + Tensor: Spectral convergence loss value. + Tensor: Log STFT magnitude loss value. + + """ + x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window) + y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window) + sc_loss = self.spectral_convergenge_loss(x_mag, y_mag) + mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag) + + return sc_loss, mag_loss + + +class MultiResolutionSTFTLoss(torch.nn.Module): + """Multi resolution STFT loss module.""" + + def __init__(self, + fft_sizes=[1024, 2048, 512], + hop_sizes=[120, 240, 50], + win_lengths=[600, 1200, 240], + window="hann_window"): + """Initialize Multi resolution STFT loss module. + + Args: + fft_sizes (list): List of FFT sizes. + hop_sizes (list): List of hop sizes. + win_lengths (list): List of window lengths. + window (str): Window function type. + + """ + super(MultiResolutionSTFTLoss, self).__init__() + assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) + self.stft_losses = torch.nn.ModuleList() + for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths): + self.stft_losses += [STFTLoss(fs, ss, wl, window)] + + def forward(self, x, y): + """Calculate forward propagation. + + Args: + x (Tensor): Predicted signal (B, T). + y (Tensor): Groundtruth signal (B, T). + + Returns: + Tensor: Multi resolution spectral convergence loss value. + Tensor: Multi resolution log STFT magnitude loss value. + + """ + sc_loss = 0.0 + mag_loss = 0.0 + for f in self.stft_losses: + sc_l, mag_l = f(x, y) + sc_loss += sc_l + mag_loss += mag_l + sc_loss /= len(self.stft_losses) + mag_loss /= len(self.stft_losses) + + return sc_loss, mag_loss diff --git a/NeuralSeq/modules/parallel_wavegan/models/__init__.py b/NeuralSeq/modules/parallel_wavegan/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4803ba6b2a0afc8022e756ae5b3f4c7403c3c1bd --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/models/__init__.py @@ -0,0 +1,2 @@ +from .melgan import * # NOQA +from .parallel_wavegan import * # NOQA diff --git a/NeuralSeq/modules/parallel_wavegan/models/__pycache__/__init__.cpython-37.pyc b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..154de3ad4893273bc4d7e59c300914b23b88b3f7 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/__init__.cpython-37.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/models/__pycache__/__init__.cpython-38.pyc b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5aabf169f48b930e57194f0ddeb24f4b859cb9a8 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/__init__.cpython-38.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/models/__pycache__/melgan.cpython-37.pyc b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/melgan.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36224cbf680b5f8ebcdbe7097c8be2d81a79445a Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/melgan.cpython-37.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/models/__pycache__/melgan.cpython-38.pyc b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/melgan.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bad2f707d39804e4f77dc66316cb8e10826a3ff Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/melgan.cpython-38.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/models/__pycache__/parallel_wavegan.cpython-37.pyc b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/parallel_wavegan.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20398fb6b7cf5f07ba7c51cbdad82139df5d105a Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/parallel_wavegan.cpython-37.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/models/__pycache__/parallel_wavegan.cpython-38.pyc b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/parallel_wavegan.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba20f2df3839f6d6d7b4d1a7e036c02bb42d3006 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/parallel_wavegan.cpython-38.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/models/__pycache__/source.cpython-37.pyc b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/source.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..044c31ddf8feee671d36b02a3389d6fc491eac01 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/source.cpython-37.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/models/__pycache__/source.cpython-38.pyc b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/source.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fea523cff82df319854153b4dc1107bd307da508 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/models/__pycache__/source.cpython-38.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/models/melgan.py b/NeuralSeq/modules/parallel_wavegan/models/melgan.py new file mode 100644 index 0000000000000000000000000000000000000000..e021ae4817a8c1c97338e61b00b230c881836fd8 --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/models/melgan.py @@ -0,0 +1,427 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""MelGAN Modules.""" + +import logging + +import numpy as np +import torch + +from modules.parallel_wavegan.layers import CausalConv1d +from modules.parallel_wavegan.layers import CausalConvTranspose1d +from modules.parallel_wavegan.layers import ResidualStack + + +class MelGANGenerator(torch.nn.Module): + """MelGAN generator module.""" + + def __init__(self, + in_channels=80, + out_channels=1, + kernel_size=7, + channels=512, + bias=True, + upsample_scales=[8, 8, 2, 2], + stack_kernel_size=3, + stacks=3, + nonlinear_activation="LeakyReLU", + nonlinear_activation_params={"negative_slope": 0.2}, + pad="ReflectionPad1d", + pad_params={}, + use_final_nonlinear_activation=True, + use_weight_norm=True, + use_causal_conv=False, + ): + """Initialize MelGANGenerator module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_size (int): Kernel size of initial and final conv layer. + channels (int): Initial number of channels for conv layer. + bias (bool): Whether to add bias parameter in convolution layers. + upsample_scales (list): List of upsampling scales. + stack_kernel_size (int): Kernel size of dilated conv layers in residual stack. + stacks (int): Number of stacks in a single residual stack. + nonlinear_activation (str): Activation function module name. + nonlinear_activation_params (dict): Hyperparameters for activation function. + pad (str): Padding function module name before dilated convolution layer. + pad_params (dict): Hyperparameters for padding function. + use_final_nonlinear_activation (torch.nn.Module): Activation function for the final layer. + use_weight_norm (bool): Whether to use weight norm. + If set to true, it will be applied to all of the conv layers. + use_causal_conv (bool): Whether to use causal convolution. + + """ + super(MelGANGenerator, self).__init__() + + # check hyper parameters is valid + assert channels >= np.prod(upsample_scales) + assert channels % (2 ** len(upsample_scales)) == 0 + if not use_causal_conv: + assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." + + # add initial layer + layers = [] + if not use_causal_conv: + layers += [ + getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params), + torch.nn.Conv1d(in_channels, channels, kernel_size, bias=bias), + ] + else: + layers += [ + CausalConv1d(in_channels, channels, kernel_size, + bias=bias, pad=pad, pad_params=pad_params), + ] + + for i, upsample_scale in enumerate(upsample_scales): + # add upsampling layer + layers += [getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)] + if not use_causal_conv: + layers += [ + torch.nn.ConvTranspose1d( + channels // (2 ** i), + channels // (2 ** (i + 1)), + upsample_scale * 2, + stride=upsample_scale, + padding=upsample_scale // 2 + upsample_scale % 2, + output_padding=upsample_scale % 2, + bias=bias, + ) + ] + else: + layers += [ + CausalConvTranspose1d( + channels // (2 ** i), + channels // (2 ** (i + 1)), + upsample_scale * 2, + stride=upsample_scale, + bias=bias, + ) + ] + + # add residual stack + for j in range(stacks): + layers += [ + ResidualStack( + kernel_size=stack_kernel_size, + channels=channels // (2 ** (i + 1)), + dilation=stack_kernel_size ** j, + bias=bias, + nonlinear_activation=nonlinear_activation, + nonlinear_activation_params=nonlinear_activation_params, + pad=pad, + pad_params=pad_params, + use_causal_conv=use_causal_conv, + ) + ] + + # add final layer + layers += [getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)] + if not use_causal_conv: + layers += [ + getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params), + torch.nn.Conv1d(channels // (2 ** (i + 1)), out_channels, kernel_size, bias=bias), + ] + else: + layers += [ + CausalConv1d(channels // (2 ** (i + 1)), out_channels, kernel_size, + bias=bias, pad=pad, pad_params=pad_params), + ] + if use_final_nonlinear_activation: + layers += [torch.nn.Tanh()] + + # define the model as a single function + self.melgan = torch.nn.Sequential(*layers) + + # apply weight norm + if use_weight_norm: + self.apply_weight_norm() + + # reset parameters + self.reset_parameters() + + def forward(self, c): + """Calculate forward propagation. + + Args: + c (Tensor): Input tensor (B, channels, T). + + Returns: + Tensor: Output tensor (B, 1, T ** prod(upsample_scales)). + + """ + return self.melgan(c) + + def remove_weight_norm(self): + """Remove weight normalization module from all of the layers.""" + def _remove_weight_norm(m): + try: + logging.debug(f"Weight norm is removed from {m}.") + torch.nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(_remove_weight_norm) + + def apply_weight_norm(self): + """Apply weight normalization module from all of the layers.""" + def _apply_weight_norm(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d): + torch.nn.utils.weight_norm(m) + logging.debug(f"Weight norm is applied to {m}.") + + self.apply(_apply_weight_norm) + + def reset_parameters(self): + """Reset parameters. + + This initialization follows official implementation manner. + https://github.com/descriptinc/melgan-neurips/blob/master/spec2wav/modules.py + + """ + def _reset_parameters(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d): + m.weight.data.normal_(0.0, 0.02) + logging.debug(f"Reset parameters in {m}.") + + self.apply(_reset_parameters) + + +class MelGANDiscriminator(torch.nn.Module): + """MelGAN discriminator module.""" + + def __init__(self, + in_channels=1, + out_channels=1, + kernel_sizes=[5, 3], + channels=16, + max_downsample_channels=1024, + bias=True, + downsample_scales=[4, 4, 4, 4], + nonlinear_activation="LeakyReLU", + nonlinear_activation_params={"negative_slope": 0.2}, + pad="ReflectionPad1d", + pad_params={}, + ): + """Initilize MelGAN discriminator module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer, + and the first and the second kernel sizes will be used for the last two layers. + For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15, + the last two layers' kernel size will be 5 and 3, respectively. + channels (int): Initial number of channels for conv layer. + max_downsample_channels (int): Maximum number of channels for downsampling layers. + bias (bool): Whether to add bias parameter in convolution layers. + downsample_scales (list): List of downsampling scales. + nonlinear_activation (str): Activation function module name. + nonlinear_activation_params (dict): Hyperparameters for activation function. + pad (str): Padding function module name before dilated convolution layer. + pad_params (dict): Hyperparameters for padding function. + + """ + super(MelGANDiscriminator, self).__init__() + self.layers = torch.nn.ModuleList() + + # check kernel size is valid + assert len(kernel_sizes) == 2 + assert kernel_sizes[0] % 2 == 1 + assert kernel_sizes[1] % 2 == 1 + + # add first layer + self.layers += [ + torch.nn.Sequential( + getattr(torch.nn, pad)((np.prod(kernel_sizes) - 1) // 2, **pad_params), + torch.nn.Conv1d(in_channels, channels, np.prod(kernel_sizes), bias=bias), + getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), + ) + ] + + # add downsample layers + in_chs = channels + for downsample_scale in downsample_scales: + out_chs = min(in_chs * downsample_scale, max_downsample_channels) + self.layers += [ + torch.nn.Sequential( + torch.nn.Conv1d( + in_chs, out_chs, + kernel_size=downsample_scale * 10 + 1, + stride=downsample_scale, + padding=downsample_scale * 5, + groups=in_chs // 4, + bias=bias, + ), + getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), + ) + ] + in_chs = out_chs + + # add final layers + out_chs = min(in_chs * 2, max_downsample_channels) + self.layers += [ + torch.nn.Sequential( + torch.nn.Conv1d( + in_chs, out_chs, kernel_sizes[0], + padding=(kernel_sizes[0] - 1) // 2, + bias=bias, + ), + getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), + ) + ] + self.layers += [ + torch.nn.Conv1d( + out_chs, out_channels, kernel_sizes[1], + padding=(kernel_sizes[1] - 1) // 2, + bias=bias, + ), + ] + + def forward(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input noise signal (B, 1, T). + + Returns: + List: List of output tensors of each layer. + + """ + outs = [] + for f in self.layers: + x = f(x) + outs += [x] + + return outs + + +class MelGANMultiScaleDiscriminator(torch.nn.Module): + """MelGAN multi-scale discriminator module.""" + + def __init__(self, + in_channels=1, + out_channels=1, + scales=3, + downsample_pooling="AvgPool1d", + # follow the official implementation setting + downsample_pooling_params={ + "kernel_size": 4, + "stride": 2, + "padding": 1, + "count_include_pad": False, + }, + kernel_sizes=[5, 3], + channels=16, + max_downsample_channels=1024, + bias=True, + downsample_scales=[4, 4, 4, 4], + nonlinear_activation="LeakyReLU", + nonlinear_activation_params={"negative_slope": 0.2}, + pad="ReflectionPad1d", + pad_params={}, + use_weight_norm=True, + ): + """Initilize MelGAN multi-scale discriminator module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + downsample_pooling (str): Pooling module name for downsampling of the inputs. + downsample_pooling_params (dict): Parameters for the above pooling module. + kernel_sizes (list): List of two kernel sizes. The sum will be used for the first conv layer, + and the first and the second kernel sizes will be used for the last two layers. + channels (int): Initial number of channels for conv layer. + max_downsample_channels (int): Maximum number of channels for downsampling layers. + bias (bool): Whether to add bias parameter in convolution layers. + downsample_scales (list): List of downsampling scales. + nonlinear_activation (str): Activation function module name. + nonlinear_activation_params (dict): Hyperparameters for activation function. + pad (str): Padding function module name before dilated convolution layer. + pad_params (dict): Hyperparameters for padding function. + use_causal_conv (bool): Whether to use causal convolution. + + """ + super(MelGANMultiScaleDiscriminator, self).__init__() + self.discriminators = torch.nn.ModuleList() + + # add discriminators + for _ in range(scales): + self.discriminators += [ + MelGANDiscriminator( + in_channels=in_channels, + out_channels=out_channels, + kernel_sizes=kernel_sizes, + channels=channels, + max_downsample_channels=max_downsample_channels, + bias=bias, + downsample_scales=downsample_scales, + nonlinear_activation=nonlinear_activation, + nonlinear_activation_params=nonlinear_activation_params, + pad=pad, + pad_params=pad_params, + ) + ] + self.pooling = getattr(torch.nn, downsample_pooling)(**downsample_pooling_params) + + # apply weight norm + if use_weight_norm: + self.apply_weight_norm() + + # reset parameters + self.reset_parameters() + + def forward(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input noise signal (B, 1, T). + + Returns: + List: List of list of each discriminator outputs, which consists of each layer output tensors. + + """ + outs = [] + for f in self.discriminators: + outs += [f(x)] + x = self.pooling(x) + + return outs + + def remove_weight_norm(self): + """Remove weight normalization module from all of the layers.""" + def _remove_weight_norm(m): + try: + logging.debug(f"Weight norm is removed from {m}.") + torch.nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(_remove_weight_norm) + + def apply_weight_norm(self): + """Apply weight normalization module from all of the layers.""" + def _apply_weight_norm(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d): + torch.nn.utils.weight_norm(m) + logging.debug(f"Weight norm is applied to {m}.") + + self.apply(_apply_weight_norm) + + def reset_parameters(self): + """Reset parameters. + + This initialization follows official implementation manner. + https://github.com/descriptinc/melgan-neurips/blob/master/spec2wav/modules.py + + """ + def _reset_parameters(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d): + m.weight.data.normal_(0.0, 0.02) + logging.debug(f"Reset parameters in {m}.") + + self.apply(_reset_parameters) diff --git a/NeuralSeq/modules/parallel_wavegan/models/parallel_wavegan.py b/NeuralSeq/modules/parallel_wavegan/models/parallel_wavegan.py new file mode 100644 index 0000000000000000000000000000000000000000..c63b59f67aa48342179415c1d1beac68574a5498 --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/models/parallel_wavegan.py @@ -0,0 +1,434 @@ +# -*- coding: utf-8 -*- + +# Copyright 2019 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""Parallel WaveGAN Modules.""" + +import logging +import math + +import torch +from torch import nn + +from modules.parallel_wavegan.layers import Conv1d +from modules.parallel_wavegan.layers import Conv1d1x1 +from modules.parallel_wavegan.layers import ResidualBlock +from modules.parallel_wavegan.layers import upsample +from modules.parallel_wavegan import models + + +class ParallelWaveGANGenerator(torch.nn.Module): + """Parallel WaveGAN Generator module.""" + + def __init__(self, + in_channels=1, + out_channels=1, + kernel_size=3, + layers=30, + stacks=3, + residual_channels=64, + gate_channels=128, + skip_channels=64, + aux_channels=80, + aux_context_window=2, + dropout=0.0, + bias=True, + use_weight_norm=True, + use_causal_conv=False, + upsample_conditional_features=True, + upsample_net="ConvInUpsampleNetwork", + upsample_params={"upsample_scales": [4, 4, 4, 4]}, + use_pitch_embed=False, + ): + """Initialize Parallel WaveGAN Generator module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_size (int): Kernel size of dilated convolution. + layers (int): Number of residual block layers. + stacks (int): Number of stacks i.e., dilation cycles. + residual_channels (int): Number of channels in residual conv. + gate_channels (int): Number of channels in gated conv. + skip_channels (int): Number of channels in skip conv. + aux_channels (int): Number of channels for auxiliary feature conv. + aux_context_window (int): Context window size for auxiliary feature. + dropout (float): Dropout rate. 0.0 means no dropout applied. + bias (bool): Whether to use bias parameter in conv layer. + use_weight_norm (bool): Whether to use weight norm. + If set to true, it will be applied to all of the conv layers. + use_causal_conv (bool): Whether to use causal structure. + upsample_conditional_features (bool): Whether to use upsampling network. + upsample_net (str): Upsampling network architecture. + upsample_params (dict): Upsampling network parameters. + + """ + super(ParallelWaveGANGenerator, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.aux_channels = aux_channels + self.layers = layers + self.stacks = stacks + self.kernel_size = kernel_size + + # check the number of layers and stacks + assert layers % stacks == 0 + layers_per_stack = layers // stacks + + # define first convolution + self.first_conv = Conv1d1x1(in_channels, residual_channels, bias=True) + + # define conv + upsampling network + if upsample_conditional_features: + upsample_params.update({ + "use_causal_conv": use_causal_conv, + }) + if upsample_net == "MelGANGenerator": + assert aux_context_window == 0 + upsample_params.update({ + "use_weight_norm": False, # not to apply twice + "use_final_nonlinear_activation": False, + }) + self.upsample_net = getattr(models, upsample_net)(**upsample_params) + else: + if upsample_net == "ConvInUpsampleNetwork": + upsample_params.update({ + "aux_channels": aux_channels, + "aux_context_window": aux_context_window, + }) + self.upsample_net = getattr(upsample, upsample_net)(**upsample_params) + else: + self.upsample_net = None + + # define residual blocks + self.conv_layers = torch.nn.ModuleList() + for layer in range(layers): + dilation = 2 ** (layer % layers_per_stack) + conv = ResidualBlock( + kernel_size=kernel_size, + residual_channels=residual_channels, + gate_channels=gate_channels, + skip_channels=skip_channels, + aux_channels=aux_channels, + dilation=dilation, + dropout=dropout, + bias=bias, + use_causal_conv=use_causal_conv, + ) + self.conv_layers += [conv] + + # define output layers + self.last_conv_layers = torch.nn.ModuleList([ + torch.nn.ReLU(inplace=True), + Conv1d1x1(skip_channels, skip_channels, bias=True), + torch.nn.ReLU(inplace=True), + Conv1d1x1(skip_channels, out_channels, bias=True), + ]) + + self.use_pitch_embed = use_pitch_embed + if use_pitch_embed: + self.pitch_embed = nn.Embedding(300, aux_channels, 0) + self.c_proj = nn.Linear(2 * aux_channels, aux_channels) + + # apply weight norm + if use_weight_norm: + self.apply_weight_norm() + + def forward(self, x, c=None, pitch=None, **kwargs): + """Calculate forward propagation. + + Args: + x (Tensor): Input noise signal (B, C_in, T). + c (Tensor): Local conditioning auxiliary features (B, C ,T'). + pitch (Tensor): Local conditioning pitch (B, T'). + + Returns: + Tensor: Output tensor (B, C_out, T) + + """ + # perform upsampling + if c is not None and self.upsample_net is not None: + if self.use_pitch_embed: + p = self.pitch_embed(pitch) + c = self.c_proj(torch.cat([c.transpose(1, 2), p], -1)).transpose(1, 2) + c = self.upsample_net(c) + assert c.size(-1) == x.size(-1), (c.size(-1), x.size(-1)) + + # encode to hidden representation + x = self.first_conv(x) + skips = 0 + for f in self.conv_layers: + x, h = f(x, c) + skips += h + skips *= math.sqrt(1.0 / len(self.conv_layers)) + + # apply final layers + x = skips + for f in self.last_conv_layers: + x = f(x) + + return x + + def remove_weight_norm(self): + """Remove weight normalization module from all of the layers.""" + def _remove_weight_norm(m): + try: + logging.debug(f"Weight norm is removed from {m}.") + torch.nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(_remove_weight_norm) + + def apply_weight_norm(self): + """Apply weight normalization module from all of the layers.""" + def _apply_weight_norm(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): + torch.nn.utils.weight_norm(m) + logging.debug(f"Weight norm is applied to {m}.") + + self.apply(_apply_weight_norm) + + @staticmethod + def _get_receptive_field_size(layers, stacks, kernel_size, + dilation=lambda x: 2 ** x): + assert layers % stacks == 0 + layers_per_cycle = layers // stacks + dilations = [dilation(i % layers_per_cycle) for i in range(layers)] + return (kernel_size - 1) * sum(dilations) + 1 + + @property + def receptive_field_size(self): + """Return receptive field size.""" + return self._get_receptive_field_size(self.layers, self.stacks, self.kernel_size) + + +class ParallelWaveGANDiscriminator(torch.nn.Module): + """Parallel WaveGAN Discriminator module.""" + + def __init__(self, + in_channels=1, + out_channels=1, + kernel_size=3, + layers=10, + conv_channels=64, + dilation_factor=1, + nonlinear_activation="LeakyReLU", + nonlinear_activation_params={"negative_slope": 0.2}, + bias=True, + use_weight_norm=True, + ): + """Initialize Parallel WaveGAN Discriminator module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_size (int): Number of output channels. + layers (int): Number of conv layers. + conv_channels (int): Number of chnn layers. + dilation_factor (int): Dilation factor. For example, if dilation_factor = 2, + the dilation will be 2, 4, 8, ..., and so on. + nonlinear_activation (str): Nonlinear function after each conv. + nonlinear_activation_params (dict): Nonlinear function parameters + bias (bool): Whether to use bias parameter in conv. + use_weight_norm (bool) Whether to use weight norm. + If set to true, it will be applied to all of the conv layers. + + """ + super(ParallelWaveGANDiscriminator, self).__init__() + assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." + assert dilation_factor > 0, "Dilation factor must be > 0." + self.conv_layers = torch.nn.ModuleList() + conv_in_channels = in_channels + for i in range(layers - 1): + if i == 0: + dilation = 1 + else: + dilation = i if dilation_factor == 1 else dilation_factor ** i + conv_in_channels = conv_channels + padding = (kernel_size - 1) // 2 * dilation + conv_layer = [ + Conv1d(conv_in_channels, conv_channels, + kernel_size=kernel_size, padding=padding, + dilation=dilation, bias=bias), + getattr(torch.nn, nonlinear_activation)(inplace=True, **nonlinear_activation_params) + ] + self.conv_layers += conv_layer + padding = (kernel_size - 1) // 2 + last_conv_layer = Conv1d( + conv_in_channels, out_channels, + kernel_size=kernel_size, padding=padding, bias=bias) + self.conv_layers += [last_conv_layer] + + # apply weight norm + if use_weight_norm: + self.apply_weight_norm() + + def forward(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input noise signal (B, 1, T). + + Returns: + Tensor: Output tensor (B, 1, T) + + """ + for f in self.conv_layers: + x = f(x) + return x + + def apply_weight_norm(self): + """Apply weight normalization module from all of the layers.""" + def _apply_weight_norm(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): + torch.nn.utils.weight_norm(m) + logging.debug(f"Weight norm is applied to {m}.") + + self.apply(_apply_weight_norm) + + def remove_weight_norm(self): + """Remove weight normalization module from all of the layers.""" + def _remove_weight_norm(m): + try: + logging.debug(f"Weight norm is removed from {m}.") + torch.nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(_remove_weight_norm) + + +class ResidualParallelWaveGANDiscriminator(torch.nn.Module): + """Parallel WaveGAN Discriminator module.""" + + def __init__(self, + in_channels=1, + out_channels=1, + kernel_size=3, + layers=30, + stacks=3, + residual_channels=64, + gate_channels=128, + skip_channels=64, + dropout=0.0, + bias=True, + use_weight_norm=True, + use_causal_conv=False, + nonlinear_activation="LeakyReLU", + nonlinear_activation_params={"negative_slope": 0.2}, + ): + """Initialize Parallel WaveGAN Discriminator module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_size (int): Kernel size of dilated convolution. + layers (int): Number of residual block layers. + stacks (int): Number of stacks i.e., dilation cycles. + residual_channels (int): Number of channels in residual conv. + gate_channels (int): Number of channels in gated conv. + skip_channels (int): Number of channels in skip conv. + dropout (float): Dropout rate. 0.0 means no dropout applied. + bias (bool): Whether to use bias parameter in conv. + use_weight_norm (bool): Whether to use weight norm. + If set to true, it will be applied to all of the conv layers. + use_causal_conv (bool): Whether to use causal structure. + nonlinear_activation_params (dict): Nonlinear function parameters + + """ + super(ResidualParallelWaveGANDiscriminator, self).__init__() + assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." + + self.in_channels = in_channels + self.out_channels = out_channels + self.layers = layers + self.stacks = stacks + self.kernel_size = kernel_size + + # check the number of layers and stacks + assert layers % stacks == 0 + layers_per_stack = layers // stacks + + # define first convolution + self.first_conv = torch.nn.Sequential( + Conv1d1x1(in_channels, residual_channels, bias=True), + getattr(torch.nn, nonlinear_activation)( + inplace=True, **nonlinear_activation_params), + ) + + # define residual blocks + self.conv_layers = torch.nn.ModuleList() + for layer in range(layers): + dilation = 2 ** (layer % layers_per_stack) + conv = ResidualBlock( + kernel_size=kernel_size, + residual_channels=residual_channels, + gate_channels=gate_channels, + skip_channels=skip_channels, + aux_channels=-1, + dilation=dilation, + dropout=dropout, + bias=bias, + use_causal_conv=use_causal_conv, + ) + self.conv_layers += [conv] + + # define output layers + self.last_conv_layers = torch.nn.ModuleList([ + getattr(torch.nn, nonlinear_activation)( + inplace=True, **nonlinear_activation_params), + Conv1d1x1(skip_channels, skip_channels, bias=True), + getattr(torch.nn, nonlinear_activation)( + inplace=True, **nonlinear_activation_params), + Conv1d1x1(skip_channels, out_channels, bias=True), + ]) + + # apply weight norm + if use_weight_norm: + self.apply_weight_norm() + + def forward(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input noise signal (B, 1, T). + + Returns: + Tensor: Output tensor (B, 1, T) + + """ + x = self.first_conv(x) + + skips = 0 + for f in self.conv_layers: + x, h = f(x, None) + skips += h + skips *= math.sqrt(1.0 / len(self.conv_layers)) + + # apply final layers + x = skips + for f in self.last_conv_layers: + x = f(x) + return x + + def apply_weight_norm(self): + """Apply weight normalization module from all of the layers.""" + def _apply_weight_norm(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): + torch.nn.utils.weight_norm(m) + logging.debug(f"Weight norm is applied to {m}.") + + self.apply(_apply_weight_norm) + + def remove_weight_norm(self): + """Remove weight normalization module from all of the layers.""" + def _remove_weight_norm(m): + try: + logging.debug(f"Weight norm is removed from {m}.") + torch.nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(_remove_weight_norm) diff --git a/NeuralSeq/modules/parallel_wavegan/models/source.py b/NeuralSeq/modules/parallel_wavegan/models/source.py new file mode 100644 index 0000000000000000000000000000000000000000..f2a006e53c0e2194036fd08ea9d6ed4d9a10d6cf --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/models/source.py @@ -0,0 +1,538 @@ +import torch +import numpy as np +import sys +import torch.nn.functional as torch_nn_func + + +class SineGen(torch.nn.Module): + """ Definition of sine generator + SineGen(samp_rate, harmonic_num = 0, + sine_amp = 0.1, noise_std = 0.003, + voiced_threshold = 0, + flag_for_pulse=False) + + samp_rate: sampling rate in Hz + harmonic_num: number of harmonic overtones (default 0) + sine_amp: amplitude of sine-wavefrom (default 0.1) + noise_std: std of Gaussian noise (default 0.003) + voiced_thoreshold: F0 threshold for U/V classification (default 0) + flag_for_pulse: this SinGen is used inside PulseGen (default False) + + Note: when flag_for_pulse is True, the first time step of a voiced + segment is always sin(np.pi) or cos(0) + """ + + def __init__(self, samp_rate, harmonic_num=0, + sine_amp=0.1, noise_std=0.003, + voiced_threshold=0, + flag_for_pulse=False): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.dim = self.harmonic_num + 1 + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + self.flag_for_pulse = flag_for_pulse + + def _f02uv(self, f0): + # generate uv signal + uv = torch.ones_like(f0) + uv = uv * (f0 > self.voiced_threshold) + return uv + + def _f02sine(self, f0_values): + """ f0_values: (batchsize, length, dim) + where dim indicates fundamental tone and overtones + """ + # convert to F0 in rad. The interger part n can be ignored + # because 2 * np.pi * n doesn't affect phase + rad_values = (f0_values / self.sampling_rate) % 1 + + # initial phase noise (no noise for fundamental component) + rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ + device=f0_values.device) + rand_ini[:, 0] = 0 + rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini + + # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) + if not self.flag_for_pulse: + # for normal case + + # To prevent torch.cumsum numerical overflow, + # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. + # Buffer tmp_over_one_idx indicates the time step to add -1. + # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi + tmp_over_one = torch.cumsum(rad_values, 1) % 1 + tmp_over_one_idx = (tmp_over_one[:, 1:, :] - + tmp_over_one[:, :-1, :]) < 0 + cumsum_shift = torch.zeros_like(rad_values) + cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + + sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) + * 2 * np.pi) + else: + # If necessary, make sure that the first time step of every + # voiced segments is sin(pi) or cos(0) + # This is used for pulse-train generation + + # identify the last time step in unvoiced segments + uv = self._f02uv(f0_values) + uv_1 = torch.roll(uv, shifts=-1, dims=1) + uv_1[:, -1, :] = 1 + u_loc = (uv < 1) * (uv_1 > 0) + + # get the instantanouse phase + tmp_cumsum = torch.cumsum(rad_values, dim=1) + # different batch needs to be processed differently + for idx in range(f0_values.shape[0]): + temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] + temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] + # stores the accumulation of i.phase within + # each voiced segments + tmp_cumsum[idx, :, :] = 0 + tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum + + # rad_values - tmp_cumsum: remove the accumulation of i.phase + # within the previous voiced segment. + i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) + + # get the sines + sines = torch.cos(i_phase * 2 * np.pi) + return sines + + def forward(self, f0): + """ sine_tensor, uv = forward(f0) + input F0: tensor(batchsize=1, length, dim=1) + f0 for unvoiced steps should be 0 + output sine_tensor: tensor(batchsize=1, length, dim) + output uv: tensor(batchsize=1, length, 1) + """ + with torch.no_grad(): + f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, + device=f0.device) + # fundamental component + f0_buf[:, :, 0] = f0[:, :, 0] + for idx in np.arange(self.harmonic_num): + # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic + f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2) + + # generate sine waveforms + sine_waves = self._f02sine(f0_buf) * self.sine_amp + + # generate uv signal + # uv = torch.ones(f0.shape) + # uv = uv * (f0 > self.voiced_threshold) + uv = self._f02uv(f0) + + # noise: for unvoiced should be similar to sine_amp + # std = self.sine_amp/3 -> max value ~ self.sine_amp + # . for voiced regions is self.noise_std + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + + # first: set the unvoiced part to 0 by uv + # then: additive noise + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + + +class PulseGen(torch.nn.Module): + """ Definition of Pulse train generator + + There are many ways to implement pulse generator. + Here, PulseGen is based on SinGen. For a perfect + """ + def __init__(self, samp_rate, pulse_amp = 0.1, + noise_std = 0.003, voiced_threshold = 0): + super(PulseGen, self).__init__() + self.pulse_amp = pulse_amp + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + self.noise_std = noise_std + self.l_sinegen = SineGen(self.sampling_rate, harmonic_num=0, \ + sine_amp=self.pulse_amp, noise_std=0, \ + voiced_threshold=self.voiced_threshold, \ + flag_for_pulse=True) + + def forward(self, f0): + """ Pulse train generator + pulse_train, uv = forward(f0) + input F0: tensor(batchsize=1, length, dim=1) + f0 for unvoiced steps should be 0 + output pulse_train: tensor(batchsize=1, length, dim) + output uv: tensor(batchsize=1, length, 1) + + Note: self.l_sine doesn't make sure that the initial phase of + a voiced segment is np.pi, the first pulse in a voiced segment + may not be at the first time step within a voiced segment + """ + with torch.no_grad(): + sine_wav, uv, noise = self.l_sinegen(f0) + + # sine without additive noise + pure_sine = sine_wav - noise + + # step t corresponds to a pulse if + # sine[t] > sine[t+1] & sine[t] > sine[t-1] + # & sine[t-1], sine[t+1], and sine[t] are voiced + # or + # sine[t] is voiced, sine[t-1] is unvoiced + # we use torch.roll to simulate sine[t+1] and sine[t-1] + sine_1 = torch.roll(pure_sine, shifts=1, dims=1) + uv_1 = torch.roll(uv, shifts=1, dims=1) + uv_1[:, 0, :] = 0 + sine_2 = torch.roll(pure_sine, shifts=-1, dims=1) + uv_2 = torch.roll(uv, shifts=-1, dims=1) + uv_2[:, -1, :] = 0 + + loc = (pure_sine > sine_1) * (pure_sine > sine_2) \ + * (uv_1 > 0) * (uv_2 > 0) * (uv > 0) \ + + (uv_1 < 1) * (uv > 0) + + # pulse train without noise + pulse_train = pure_sine * loc + + # additive noise to pulse train + # note that noise from sinegen is zero in voiced regions + pulse_noise = torch.randn_like(pure_sine) * self.noise_std + + # with additive noise on pulse, and unvoiced regions + pulse_train += pulse_noise * loc + pulse_noise * (1 - uv) + return pulse_train, sine_wav, uv, pulse_noise + + +class SignalsConv1d(torch.nn.Module): + """ Filtering input signal with time invariant filter + Note: FIRFilter conducted filtering given fixed FIR weight + SignalsConv1d convolves two signals + Note: this is based on torch.nn.functional.conv1d + + """ + + def __init__(self): + super(SignalsConv1d, self).__init__() + + def forward(self, signal, system_ir): + """ output = forward(signal, system_ir) + + signal: (batchsize, length1, dim) + system_ir: (length2, dim) + + output: (batchsize, length1, dim) + """ + if signal.shape[-1] != system_ir.shape[-1]: + print("Error: SignalsConv1d expects shape:") + print("signal (batchsize, length1, dim)") + print("system_id (batchsize, length2, dim)") + print("But received signal: {:s}".format(str(signal.shape))) + print(" system_ir: {:s}".format(str(system_ir.shape))) + sys.exit(1) + padding_length = system_ir.shape[0] - 1 + groups = signal.shape[-1] + + # pad signal on the left + signal_pad = torch_nn_func.pad(signal.permute(0, 2, 1), \ + (padding_length, 0)) + # prepare system impulse response as (dim, 1, length2) + # also flip the impulse response + ir = torch.flip(system_ir.unsqueeze(1).permute(2, 1, 0), \ + dims=[2]) + # convolute + output = torch_nn_func.conv1d(signal_pad, ir, groups=groups) + return output.permute(0, 2, 1) + + +class CyclicNoiseGen_v1(torch.nn.Module): + """ CyclicnoiseGen_v1 + Cyclic noise with a single parameter of beta. + Pytorch v1 implementation assumes f_t is also fixed + """ + + def __init__(self, samp_rate, + noise_std=0.003, voiced_threshold=0): + super(CyclicNoiseGen_v1, self).__init__() + self.samp_rate = samp_rate + self.noise_std = noise_std + self.voiced_threshold = voiced_threshold + + self.l_pulse = PulseGen(samp_rate, pulse_amp=1.0, + noise_std=noise_std, + voiced_threshold=voiced_threshold) + self.l_conv = SignalsConv1d() + + def noise_decay(self, beta, f0mean): + """ decayed_noise = noise_decay(beta, f0mean) + decayed_noise = n[t]exp(-t * f_mean / beta / samp_rate) + + beta: (dim=1) or (batchsize=1, 1, dim=1) + f0mean (batchsize=1, 1, dim=1) + + decayed_noise (batchsize=1, length, dim=1) + """ + with torch.no_grad(): + # exp(-1.0 n / T) < 0.01 => n > -log(0.01)*T = 4.60*T + # truncate the noise when decayed by -40 dB + length = 4.6 * self.samp_rate / f0mean + length = length.int() + time_idx = torch.arange(0, length, device=beta.device) + time_idx = time_idx.unsqueeze(0).unsqueeze(2) + time_idx = time_idx.repeat(beta.shape[0], 1, beta.shape[2]) + + noise = torch.randn(time_idx.shape, device=beta.device) + + # due to Pytorch implementation, use f0_mean as the f0 factor + decay = torch.exp(-time_idx * f0mean / beta / self.samp_rate) + return noise * self.noise_std * decay + + def forward(self, f0s, beta): + """ Producde cyclic-noise + """ + # pulse train + pulse_train, sine_wav, uv, noise = self.l_pulse(f0s) + pure_pulse = pulse_train - noise + + # decayed_noise (length, dim=1) + if (uv < 1).all(): + # all unvoiced + cyc_noise = torch.zeros_like(sine_wav) + else: + f0mean = f0s[uv > 0].mean() + + decayed_noise = self.noise_decay(beta, f0mean)[0, :, :] + # convolute + cyc_noise = self.l_conv(pure_pulse, decayed_noise) + + # add noise in invoiced segments + cyc_noise = cyc_noise + noise * (1.0 - uv) + return cyc_noise, pulse_train, sine_wav, uv, noise + + +class SineGen(torch.nn.Module): + """ Definition of sine generator + SineGen(samp_rate, harmonic_num = 0, + sine_amp = 0.1, noise_std = 0.003, + voiced_threshold = 0, + flag_for_pulse=False) + + samp_rate: sampling rate in Hz + harmonic_num: number of harmonic overtones (default 0) + sine_amp: amplitude of sine-wavefrom (default 0.1) + noise_std: std of Gaussian noise (default 0.003) + voiced_thoreshold: F0 threshold for U/V classification (default 0) + flag_for_pulse: this SinGen is used inside PulseGen (default False) + + Note: when flag_for_pulse is True, the first time step of a voiced + segment is always sin(np.pi) or cos(0) + """ + + def __init__(self, samp_rate, harmonic_num=0, + sine_amp=0.1, noise_std=0.003, + voiced_threshold=0, + flag_for_pulse=False): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.dim = self.harmonic_num + 1 + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + self.flag_for_pulse = flag_for_pulse + + def _f02uv(self, f0): + # generate uv signal + uv = torch.ones_like(f0) + uv = uv * (f0 > self.voiced_threshold) + return uv + + def _f02sine(self, f0_values): + """ f0_values: (batchsize, length, dim) + where dim indicates fundamental tone and overtones + """ + # convert to F0 in rad. The interger part n can be ignored + # because 2 * np.pi * n doesn't affect phase + rad_values = (f0_values / self.sampling_rate) % 1 + + # initial phase noise (no noise for fundamental component) + rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ + device=f0_values.device) + rand_ini[:, 0] = 0 + rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini + + # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) + if not self.flag_for_pulse: + # for normal case + + # To prevent torch.cumsum numerical overflow, + # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. + # Buffer tmp_over_one_idx indicates the time step to add -1. + # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi + tmp_over_one = torch.cumsum(rad_values, 1) % 1 + tmp_over_one_idx = (tmp_over_one[:, 1:, :] - + tmp_over_one[:, :-1, :]) < 0 + cumsum_shift = torch.zeros_like(rad_values) + cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + + sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) + * 2 * np.pi) + else: + # If necessary, make sure that the first time step of every + # voiced segments is sin(pi) or cos(0) + # This is used for pulse-train generation + + # identify the last time step in unvoiced segments + uv = self._f02uv(f0_values) + uv_1 = torch.roll(uv, shifts=-1, dims=1) + uv_1[:, -1, :] = 1 + u_loc = (uv < 1) * (uv_1 > 0) + + # get the instantanouse phase + tmp_cumsum = torch.cumsum(rad_values, dim=1) + # different batch needs to be processed differently + for idx in range(f0_values.shape[0]): + temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] + temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] + # stores the accumulation of i.phase within + # each voiced segments + tmp_cumsum[idx, :, :] = 0 + tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum + + # rad_values - tmp_cumsum: remove the accumulation of i.phase + # within the previous voiced segment. + i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) + + # get the sines + sines = torch.cos(i_phase * 2 * np.pi) + return sines + + def forward(self, f0): + """ sine_tensor, uv = forward(f0) + input F0: tensor(batchsize=1, length, dim=1) + f0 for unvoiced steps should be 0 + output sine_tensor: tensor(batchsize=1, length, dim) + output uv: tensor(batchsize=1, length, 1) + """ + with torch.no_grad(): + f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \ + device=f0.device) + # fundamental component + f0_buf[:, :, 0] = f0[:, :, 0] + for idx in np.arange(self.harmonic_num): + # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic + f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2) + + # generate sine waveforms + sine_waves = self._f02sine(f0_buf) * self.sine_amp + + # generate uv signal + # uv = torch.ones(f0.shape) + # uv = uv * (f0 > self.voiced_threshold) + uv = self._f02uv(f0) + + # noise: for unvoiced should be similar to sine_amp + # std = self.sine_amp/3 -> max value ~ self.sine_amp + # . for voiced regions is self.noise_std + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + + # first: set the unvoiced part to 0 by uv + # then: additive noise + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + + +class SourceModuleCycNoise_v1(torch.nn.Module): + """ SourceModuleCycNoise_v1 + SourceModule(sampling_rate, noise_std=0.003, voiced_threshod=0) + sampling_rate: sampling_rate in Hz + + noise_std: std of Gaussian noise (default: 0.003) + voiced_threshold: threshold to set U/V given F0 (default: 0) + + cyc, noise, uv = SourceModuleCycNoise_v1(F0_upsampled, beta) + F0_upsampled (batchsize, length, 1) + beta (1) + cyc (batchsize, length, 1) + noise (batchsize, length, 1) + uv (batchsize, length, 1) + """ + + def __init__(self, sampling_rate, noise_std=0.003, voiced_threshod=0): + super(SourceModuleCycNoise_v1, self).__init__() + self.sampling_rate = sampling_rate + self.noise_std = noise_std + self.l_cyc_gen = CyclicNoiseGen_v1(sampling_rate, noise_std, + voiced_threshod) + + def forward(self, f0_upsamped, beta): + """ + cyc, noise, uv = SourceModuleCycNoise_v1(F0, beta) + F0_upsampled (batchsize, length, 1) + beta (1) + cyc (batchsize, length, 1) + noise (batchsize, length, 1) + uv (batchsize, length, 1) + """ + # source for harmonic branch + cyc, pulse, sine, uv, add_noi = self.l_cyc_gen(f0_upsamped, beta) + + # source for noise branch, in the same shape as uv + noise = torch.randn_like(uv) * self.noise_std / 3 + return cyc, noise, uv + + +class SourceModuleHnNSF(torch.nn.Module): + """ SourceModule for hn-nsf + SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0) + sampling_rate: sampling_rate in Hz + harmonic_num: number of harmonic above F0 (default: 0) + sine_amp: amplitude of sine source signal (default: 0.1) + add_noise_std: std of additive Gaussian noise (default: 0.003) + note that amplitude of noise in unvoiced is decided + by sine_amp + voiced_threshold: threhold to set U/V given F0 (default: 0) + + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + uv (batchsize, length, 1) + """ + + def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0): + super(SourceModuleHnNSF, self).__init__() + + self.sine_amp = sine_amp + self.noise_std = add_noise_std + + # to produce sine waveforms + self.l_sin_gen = SineGen(sampling_rate, harmonic_num, + sine_amp, add_noise_std, voiced_threshod) + + # to merge source harmonics into a single excitation + self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) + self.l_tanh = torch.nn.Tanh() + + def forward(self, x): + """ + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + """ + # source for harmonic branch + sine_wavs, uv, _ = self.l_sin_gen(x) + sine_merge = self.l_tanh(self.l_linear(sine_wavs)) + + # source for noise branch, in the same shape as uv + noise = torch.randn_like(uv) * self.sine_amp / 3 + return sine_merge, noise, uv + + +if __name__ == '__main__': + source = SourceModuleCycNoise_v1(24000) + x = torch.randn(16, 25600, 1) + + diff --git a/NeuralSeq/modules/parallel_wavegan/optimizers/__init__.py b/NeuralSeq/modules/parallel_wavegan/optimizers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a0e0c5932838281e912079e5784d84d43444a61a --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/optimizers/__init__.py @@ -0,0 +1,2 @@ +from torch.optim import * # NOQA +from .radam import * # NOQA diff --git a/NeuralSeq/modules/parallel_wavegan/optimizers/radam.py b/NeuralSeq/modules/parallel_wavegan/optimizers/radam.py new file mode 100644 index 0000000000000000000000000000000000000000..e805d7e34921bee436e1e7fd9e1f753c7609186b --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/optimizers/radam.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- + +"""RAdam optimizer. + +This code is drived from https://github.com/LiyuanLucasLiu/RAdam. +""" + +import math +import torch + +from torch.optim.optimizer import Optimizer + + +class RAdam(Optimizer): + """Rectified Adam optimizer.""" + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): + """Initilize RAdam optimizer.""" + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + self.buffer = [[None, None, None] for ind in range(10)] + super(RAdam, self).__init__(params, defaults) + + def __setstate__(self, state): + """Set state.""" + super(RAdam, self).__setstate__(state) + + def step(self, closure=None): + """Run one step.""" + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data.float() + if grad.is_sparse: + raise RuntimeError('RAdam does not support sparse gradients') + + p_data_fp32 = p.data.float() + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_data_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) + else: + state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + exp_avg.mul_(beta1).add_(1 - beta1, grad) + + state['step'] += 1 + buffered = self.buffer[int(state['step'] % 10)] + if state['step'] == buffered[0]: + N_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + N_sma_max = 2 / (1 - beta2) - 1 + N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = N_sma + + # more conservative since it's an approximated value + if N_sma >= 5: + step_size = math.sqrt( + (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step']) # NOQA + else: + step_size = 1.0 / (1 - beta1 ** state['step']) + buffered[2] = step_size + + if group['weight_decay'] != 0: + p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) + + # more conservative since it's an approximated value + if N_sma >= 5: + denom = exp_avg_sq.sqrt().add_(group['eps']) + p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom) + else: + p_data_fp32.add_(-step_size * group['lr'], exp_avg) + + p.data.copy_(p_data_fp32) + + return loss diff --git a/NeuralSeq/modules/parallel_wavegan/stft_loss.py b/NeuralSeq/modules/parallel_wavegan/stft_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..229e6c777dc9ec7f710842d1e648dba1189ec8b4 --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/stft_loss.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- + +# Copyright 2019 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""STFT-based Loss modules.""" +import librosa +import torch + +from modules.parallel_wavegan.losses import LogSTFTMagnitudeLoss, SpectralConvergengeLoss, stft + + +class STFTLoss(torch.nn.Module): + """STFT loss module.""" + + def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window", + use_mel_loss=False): + """Initialize STFT loss module.""" + super(STFTLoss, self).__init__() + self.fft_size = fft_size + self.shift_size = shift_size + self.win_length = win_length + self.window = getattr(torch, window)(win_length) + self.spectral_convergenge_loss = SpectralConvergengeLoss() + self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss() + self.use_mel_loss = use_mel_loss + self.mel_basis = None + + def forward(self, x, y): + """Calculate forward propagation. + + Args: + x (Tensor): Predicted signal (B, T). + y (Tensor): Groundtruth signal (B, T). + + Returns: + Tensor: Spectral convergence loss value. + Tensor: Log STFT magnitude loss value. + + """ + x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window) + y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window) + if self.use_mel_loss: + if self.mel_basis is None: + self.mel_basis = torch.from_numpy(librosa.filters.mel(22050, self.fft_size, 80)).cuda().T + x_mag = x_mag @ self.mel_basis + y_mag = y_mag @ self.mel_basis + + sc_loss = self.spectral_convergenge_loss(x_mag, y_mag) + mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag) + + return sc_loss, mag_loss + + +class MultiResolutionSTFTLoss(torch.nn.Module): + """Multi resolution STFT loss module.""" + + def __init__(self, + fft_sizes=[1024, 2048, 512], + hop_sizes=[120, 240, 50], + win_lengths=[600, 1200, 240], + window="hann_window", + use_mel_loss=False): + """Initialize Multi resolution STFT loss module. + + Args: + fft_sizes (list): List of FFT sizes. + hop_sizes (list): List of hop sizes. + win_lengths (list): List of window lengths. + window (str): Window function type. + + """ + super(MultiResolutionSTFTLoss, self).__init__() + assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) + self.stft_losses = torch.nn.ModuleList() + for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths): + self.stft_losses += [STFTLoss(fs, ss, wl, window, use_mel_loss)] + + def forward(self, x, y): + """Calculate forward propagation. + + Args: + x (Tensor): Predicted signal (B, T). + y (Tensor): Groundtruth signal (B, T). + + Returns: + Tensor: Multi resolution spectral convergence loss value. + Tensor: Multi resolution log STFT magnitude loss value. + + """ + sc_loss = 0.0 + mag_loss = 0.0 + for f in self.stft_losses: + sc_l, mag_l = f(x, y) + sc_loss += sc_l + mag_loss += mag_l + sc_loss /= len(self.stft_losses) + mag_loss /= len(self.stft_losses) + + return sc_loss, mag_loss diff --git a/NeuralSeq/modules/parallel_wavegan/utils/__init__.py b/NeuralSeq/modules/parallel_wavegan/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e8fa95a020706b5412c3959fbf6e5980019c0d5f --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/utils/__init__.py @@ -0,0 +1 @@ +from .utils import * # NOQA diff --git a/NeuralSeq/modules/parallel_wavegan/utils/__pycache__/__init__.cpython-37.pyc b/NeuralSeq/modules/parallel_wavegan/utils/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..169bc970f37a72b64bdfff8b12c4d7d3e3546058 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/utils/__pycache__/__init__.cpython-37.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/utils/__pycache__/__init__.cpython-38.pyc b/NeuralSeq/modules/parallel_wavegan/utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64a92a1e5741f67fde6c5fe26bc881931721eabd Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/utils/__pycache__/__init__.cpython-38.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/utils/__pycache__/utils.cpython-37.pyc b/NeuralSeq/modules/parallel_wavegan/utils/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9e12811faed2d42ea2021702c74e1b187e9fc07 Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/utils/__pycache__/utils.cpython-37.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/utils/__pycache__/utils.cpython-38.pyc b/NeuralSeq/modules/parallel_wavegan/utils/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66dcd938198b5d4fbb37dc9b6b550926d2650d9d Binary files /dev/null and b/NeuralSeq/modules/parallel_wavegan/utils/__pycache__/utils.cpython-38.pyc differ diff --git a/NeuralSeq/modules/parallel_wavegan/utils/utils.py b/NeuralSeq/modules/parallel_wavegan/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d48a5ed28e8555d4b8cfb15fdee86426bbb9e368 --- /dev/null +++ b/NeuralSeq/modules/parallel_wavegan/utils/utils.py @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- + +# Copyright 2019 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""Utility functions.""" + +import fnmatch +import logging +import os +import sys + +import h5py +import numpy as np + + +def find_files(root_dir, query="*.wav", include_root_dir=True): + """Find files recursively. + + Args: + root_dir (str): Root root_dir to find. + query (str): Query to find. + include_root_dir (bool): If False, root_dir name is not included. + + Returns: + list: List of found filenames. + + """ + files = [] + for root, dirnames, filenames in os.walk(root_dir, followlinks=True): + for filename in fnmatch.filter(filenames, query): + files.append(os.path.join(root, filename)) + if not include_root_dir: + files = [file_.replace(root_dir + "/", "") for file_ in files] + + return files + + +def read_hdf5(hdf5_name, hdf5_path): + """Read hdf5 dataset. + + Args: + hdf5_name (str): Filename of hdf5 file. + hdf5_path (str): Dataset name in hdf5 file. + + Return: + any: Dataset values. + + """ + if not os.path.exists(hdf5_name): + logging.error(f"There is no such a hdf5 file ({hdf5_name}).") + sys.exit(1) + + hdf5_file = h5py.File(hdf5_name, "r") + + if hdf5_path not in hdf5_file: + logging.error(f"There is no such a data in hdf5 file. ({hdf5_path})") + sys.exit(1) + + hdf5_data = hdf5_file[hdf5_path][()] + hdf5_file.close() + + return hdf5_data + + +def write_hdf5(hdf5_name, hdf5_path, write_data, is_overwrite=True): + """Write dataset to hdf5. + + Args: + hdf5_name (str): Hdf5 dataset filename. + hdf5_path (str): Dataset path in hdf5. + write_data (ndarray): Data to write. + is_overwrite (bool): Whether to overwrite dataset. + + """ + # convert to numpy array + write_data = np.array(write_data) + + # check folder existence + folder_name, _ = os.path.split(hdf5_name) + if not os.path.exists(folder_name) and len(folder_name) != 0: + os.makedirs(folder_name) + + # check hdf5 existence + if os.path.exists(hdf5_name): + # if already exists, open with r+ mode + hdf5_file = h5py.File(hdf5_name, "r+") + # check dataset existence + if hdf5_path in hdf5_file: + if is_overwrite: + logging.warning("Dataset in hdf5 file already exists. " + "recreate dataset in hdf5.") + hdf5_file.__delitem__(hdf5_path) + else: + logging.error("Dataset in hdf5 file already exists. " + "if you want to overwrite, please set is_overwrite = True.") + hdf5_file.close() + sys.exit(1) + else: + # if not exists, open with w mode + hdf5_file = h5py.File(hdf5_name, "w") + + # write data to hdf5 + hdf5_file.create_dataset(hdf5_path, data=write_data) + hdf5_file.flush() + hdf5_file.close() + + +class HDF5ScpLoader(object): + """Loader class for a fests.scp file of hdf5 file. + + Examples: + key1 /some/path/a.h5:feats + key2 /some/path/b.h5:feats + key3 /some/path/c.h5:feats + key4 /some/path/d.h5:feats + ... + >>> loader = HDF5ScpLoader("hdf5.scp") + >>> array = loader["key1"] + + key1 /some/path/a.h5 + key2 /some/path/b.h5 + key3 /some/path/c.h5 + key4 /some/path/d.h5 + ... + >>> loader = HDF5ScpLoader("hdf5.scp", "feats") + >>> array = loader["key1"] + + """ + + def __init__(self, feats_scp, default_hdf5_path="feats"): + """Initialize HDF5 scp loader. + + Args: + feats_scp (str): Kaldi-style feats.scp file with hdf5 format. + default_hdf5_path (str): Path in hdf5 file. If the scp contain the info, not used. + + """ + self.default_hdf5_path = default_hdf5_path + with open(feats_scp) as f: + lines = [line.replace("\n", "") for line in f.readlines()] + self.data = {} + for line in lines: + key, value = line.split() + self.data[key] = value + + def get_path(self, key): + """Get hdf5 file path for a given key.""" + return self.data[key] + + def __getitem__(self, key): + """Get ndarray for a given key.""" + p = self.data[key] + if ":" in p: + return read_hdf5(*p.split(":")) + else: + return read_hdf5(p, self.default_hdf5_path) + + def __len__(self): + """Return the length of the scp file.""" + return len(self.data) + + def __iter__(self): + """Return the iterator of the scp file.""" + return iter(self.data) + + def keys(self): + """Return the keys of the scp file.""" + return self.data.keys() diff --git a/NeuralSeq/modules/portaspeech/fs.py b/NeuralSeq/modules/portaspeech/fs.py new file mode 100644 index 0000000000000000000000000000000000000000..477cc6501bc700367eb43f17be4a7bf5822b6a1e --- /dev/null +++ b/NeuralSeq/modules/portaspeech/fs.py @@ -0,0 +1,188 @@ +from utils.hparams import hparams +import torch +from torch import nn +import torch.nn.functional as F +from modules.commons.conv import TextConvEncoder, ConvBlocks +from modules.commons.common_layers import Embedding +from modules.fastspeech.tts_modules import LayerNorm, PitchPredictor, LengthRegulator +from modules.commons.rel_transformer import RelTransformerEncoder, BERTRelTransformerEncoder +from modules.commons.align_ops import clip_mel2token_to_multiple, expand_states +from utils.pitch_utils import denorm_f0, f0_to_coarse + +FS_ENCODERS = { + 'rel_fft': lambda hp, dict: RelTransformerEncoder( + len(dict), hp['hidden_size'], hp['hidden_size'], + hp['ffn_hidden_size'], hp['num_heads'], hp['enc_layers'], + hp['enc_ffn_kernel_size'], hp['dropout'], prenet=hp['enc_prenet'], pre_ln=hp['enc_pre_ln']), +} + +FS_DECODERS = { + 'conv': lambda hp: ConvBlocks(hp['hidden_size'], hp['hidden_size'], hp['dec_dilations'], + hp['dec_kernel_size'], layers_in_block=hp['layers_in_block'], + norm_type=hp['enc_dec_norm'], dropout=hp['dropout'], + post_net_kernel=hp.get('dec_post_net_kernel', 3)), +} + +class DurationPredictor(torch.nn.Module): + def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0): + super(DurationPredictor, self).__init__() + self.offset = offset + self.conv = torch.nn.ModuleList() + self.kernel_size = kernel_size + for idx in range(n_layers): + in_chans = idim if idx == 0 else n_chans + self.conv += [torch.nn.Sequential( + torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=kernel_size // 2), + torch.nn.ReLU(), + LayerNorm(n_chans, dim=1), + torch.nn.Dropout(dropout_rate) + )] + self.linear = nn.Sequential(torch.nn.Linear(n_chans, 1), nn.Softplus()) + + def forward(self, x, x_padding=None): + x = x.transpose(1, -1) # (B, idim, Tmax) + for f in self.conv: + x = f(x) # (B, C, Tmax) + if x_padding is not None: + x = x * (1 - x_padding.float())[:, None, :] + + x = self.linear(x.transpose(1, -1)) # [B, T, C] + x = x * (1 - x_padding.float())[:, :, None] # (B, T, C) + x = x[..., 0] # (B, Tmax) + return x + +class FastSpeech(nn.Module): + def __init__(self, dict_size, out_dims=None): + super().__init__() + self.enc_layers = hparams['enc_layers'] + self.dec_layers = hparams['dec_layers'] + self.hidden_size = hparams['hidden_size'] + if hparams.get("use_bert") is True: + self.ph_encoder = BERTRelTransformerEncoder(dict_size, hparams['hidden_size'], hparams['hidden_size'], + hparams['ffn_hidden_size'], hparams['num_heads'], hparams['enc_layers'], + hparams['enc_ffn_kernel_size'], hparams['dropout'], prenet=hparams['enc_prenet'], pre_ln=hparams['enc_pre_ln']) + else: + self.ph_encoder = FS_ENCODERS[hparams['encoder_type']](hparams, dict_size) + self.decoder = FS_DECODERS[hparams['decoder_type']](hparams) + self.out_dims = hparams['audio_num_mel_bins'] if out_dims is None else out_dims + self.mel_out = nn.Linear(self.hidden_size, self.out_dims, bias=True) + if hparams['use_spk_id']: + self.spk_id_proj = Embedding(hparams['num_spk'], self.hidden_size) + if hparams['use_spk_embed']: + self.spk_embed_proj = nn.Linear(256, self.hidden_size, bias=True) + predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size + self.dur_predictor = DurationPredictor( + self.hidden_size, + n_chans=predictor_hidden, + n_layers=hparams['dur_predictor_layers'], + dropout_rate=hparams['predictor_dropout'], + kernel_size=hparams['dur_predictor_kernel']) + self.length_regulator = LengthRegulator() + if hparams['use_pitch_embed']: + self.pitch_embed = Embedding(300, self.hidden_size, 0) + self.pitch_predictor = PitchPredictor( + self.hidden_size, n_chans=predictor_hidden, + n_layers=5, dropout_rate=0.1, odim=2, + kernel_size=hparams['predictor_kernel']) + if hparams['dec_inp_add_noise']: + self.z_channels = hparams['z_channels'] + self.dec_inp_noise_proj = nn.Linear(self.hidden_size + self.z_channels, self.hidden_size) + + def forward(self, txt_tokens, mel2ph=None, spk_embed=None, spk_id=None, + f0=None, uv=None, infer=False, **kwargs): + ret = {} + src_nonpadding = (txt_tokens > 0).float()[:, :, None] + style_embed = self.forward_style_embed(spk_embed, spk_id) + + use_bert = hparams.get("use_bert") is True + if use_bert: + encoder_out = self.encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=kwargs['ph2word'], + ret=ret) * src_nonpadding + style_embed + else: + encoder_out = self.encoder(txt_tokens) * src_nonpadding + style_embed + + # add dur + dur_inp = (encoder_out + style_embed) * src_nonpadding + mel2ph = self.forward_dur(dur_inp, mel2ph, txt_tokens, ret) + tgt_nonpadding = (mel2ph > 0).float()[:, :, None] + decoder_inp = expand_states(encoder_out, mel2ph) + + # add pitch embed + if hparams['use_pitch_embed']: + pitch_inp = (decoder_inp + style_embed) * tgt_nonpadding + decoder_inp = decoder_inp + self.forward_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out) + + # decoder input + ret['decoder_inp'] = decoder_inp = (decoder_inp + style_embed) * tgt_nonpadding + if hparams['dec_inp_add_noise']: + B, T, _ = decoder_inp.shape + z = kwargs.get('adv_z', torch.randn([B, T, self.z_channels])).to(decoder_inp.device) + ret['adv_z'] = z + decoder_inp = torch.cat([decoder_inp, z], -1) + decoder_inp = self.dec_inp_noise_proj(decoder_inp) * tgt_nonpadding + ret['mel_out'] = self.forward_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs) + return ret + + def forward_style_embed(self, spk_embed=None, spk_id=None): + # add spk embed + style_embed = 0 + if hparams['use_spk_embed']: + style_embed = style_embed + self.spk_embed_proj(spk_embed)[:, None, :] + if hparams['use_spk_id']: + style_embed = style_embed + self.spk_id_proj(spk_id)[:, None, :] + return style_embed + + def forward_dur(self, dur_input, mel2ph, txt_tokens, ret): + """ + + :param dur_input: [B, T_txt, H] + :param mel2ph: [B, T_mel] + :param txt_tokens: [B, T_txt] + :param ret: + :return: + """ + src_padding = txt_tokens == 0 + if hparams['predictor_grad'] != 1: + dur_input = dur_input.detach() + hparams['predictor_grad'] * (dur_input - dur_input.detach()) + dur = self.dur_predictor(dur_input, src_padding) + ret['dur'] = dur + if mel2ph is None: + mel2ph = self.length_regulator(dur, src_padding).detach() + ret['mel2ph'] = mel2ph = clip_mel2token_to_multiple(mel2ph, hparams['frames_multiple']) + return mel2ph + + def forward_pitch(self, decoder_inp, f0, uv, mel2ph, ret, encoder_out=None): + if hparams['pitch_type'] == 'frame': + pitch_pred_inp = decoder_inp + pitch_padding = mel2ph == 0 + else: + pitch_pred_inp = encoder_out + pitch_padding = encoder_out.abs().sum(-1) == 0 + uv = None + if hparams['predictor_grad'] != 1: + pitch_pred_inp = pitch_pred_inp.detach() + \ + hparams['predictor_grad'] * (pitch_pred_inp - pitch_pred_inp.detach()) + ret['pitch_pred'] = pitch_pred = self.pitch_predictor(pitch_pred_inp) + use_uv = hparams['pitch_type'] == 'frame' and hparams['use_uv'] + if f0 is None: + f0 = pitch_pred[:, :, 0] + if use_uv: + uv = pitch_pred[:, :, 1] > 0 + f0_denorm = denorm_f0(f0, uv if use_uv else None, pitch_padding=pitch_padding) + pitch = f0_to_coarse(f0_denorm) # start from 0 [B, T_txt] + ret['f0_denorm'] = f0_denorm + ret['f0_denorm_pred'] = denorm_f0( + pitch_pred[:, :, 0], (pitch_pred[:, :, 1] > 0) if use_uv else None, + pitch_padding=pitch_padding) + if hparams['pitch_type'] == 'ph': + pitch = torch.gather(F.pad(pitch, [1, 0]), 1, mel2ph) + ret['f0_denorm'] = torch.gather(F.pad(ret['f0_denorm'], [1, 0]), 1, mel2ph) + ret['f0_denorm_pred'] = torch.gather(F.pad(ret['f0_denorm_pred'], [1, 0]), 1, mel2ph) + pitch_embed = self.pitch_embed(pitch) + return pitch_embed + + def forward_decoder(self, decoder_inp, tgt_nonpadding, ret, infer, **kwargs): + x = decoder_inp # [B, T, H] + x = self.decoder(x) + x = self.mel_out(x) + return x * tgt_nonpadding diff --git a/NeuralSeq/modules/portaspeech/fvae.py b/NeuralSeq/modules/portaspeech/fvae.py new file mode 100644 index 0000000000000000000000000000000000000000..9659a4f2abb9fd2ef887a432b7ad55b0129a1c6f --- /dev/null +++ b/NeuralSeq/modules/portaspeech/fvae.py @@ -0,0 +1,202 @@ +import numpy as np +import torch +import torch.distributions as dist +from torch import nn + +from modules.commons.conv import ConditionalConvBlocks +from modules.commons.normalizing_flow.res_flow import ResFlow +from modules.commons.wavenet import WN + + +class FVAEEncoder(nn.Module): + def __init__(self, c_in, hidden_size, c_latent, kernel_size, + n_layers, c_cond=0, p_dropout=0, strides=[4], nn_type='wn'): + super().__init__() + self.strides = strides + self.hidden_size = hidden_size + if np.prod(strides) == 1: + self.pre_net = nn.Conv1d(c_in, hidden_size, kernel_size=1) + else: + self.pre_net = nn.Sequential(*[ + nn.Conv1d(c_in, hidden_size, kernel_size=s * 2, stride=s, padding=s // 2) + if i == 0 else + nn.Conv1d(hidden_size, hidden_size, kernel_size=s * 2, stride=s, padding=s // 2) + for i, s in enumerate(strides) + ]) + if nn_type == 'wn': + self.nn = WN(hidden_size, kernel_size, 1, n_layers, c_cond, p_dropout) + elif nn_type == 'conv': + self.nn = ConditionalConvBlocks( + hidden_size, c_cond, hidden_size, None, kernel_size, + layers_in_block=2, is_BTC=False, num_layers=n_layers) + + self.out_proj = nn.Conv1d(hidden_size, c_latent * 2, 1) + self.latent_channels = c_latent + + def forward(self, x, nonpadding, cond): + x = self.pre_net(x) + nonpadding = nonpadding[:, :, ::np.prod(self.strides)][:, :, :x.shape[-1]] + x = x * nonpadding + x = self.nn(x, nonpadding=nonpadding, cond=cond) * nonpadding + x = self.out_proj(x) + m, logs = torch.split(x, self.latent_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) + return z, m, logs, nonpadding + + +class FVAEDecoder(nn.Module): + def __init__(self, c_latent, hidden_size, out_channels, kernel_size, + n_layers, c_cond=0, p_dropout=0, strides=[4], nn_type='wn'): + super().__init__() + self.strides = strides + self.hidden_size = hidden_size + self.pre_net = nn.Sequential(*[ + nn.ConvTranspose1d(c_latent, hidden_size, kernel_size=s, stride=s) + if i == 0 else + nn.ConvTranspose1d(hidden_size, hidden_size, kernel_size=s, stride=s) + for i, s in enumerate(strides) + ]) + if nn_type == 'wn': + self.nn = WN(hidden_size, kernel_size, 1, n_layers, c_cond, p_dropout) + elif nn_type == 'conv': + self.nn = ConditionalConvBlocks( + hidden_size, c_cond, hidden_size, [1] * n_layers, kernel_size, + layers_in_block=2, is_BTC=False) + self.out_proj = nn.Conv1d(hidden_size, out_channels, 1) + + def forward(self, x, nonpadding, cond): + x = self.pre_net(x) + x = x * nonpadding + x = self.nn(x, nonpadding=nonpadding, cond=cond) * nonpadding + x = self.out_proj(x) + return x + + +class FVAE(nn.Module): + def __init__(self, + c_in_out, hidden_size, c_latent, + kernel_size, enc_n_layers, dec_n_layers, c_cond, strides, + use_prior_flow, flow_hidden=None, flow_kernel_size=None, flow_n_steps=None, + encoder_type='wn', decoder_type='wn'): + super(FVAE, self).__init__() + self.strides = strides + self.hidden_size = hidden_size + self.latent_size = c_latent + self.use_prior_flow = use_prior_flow + if np.prod(strides) == 1: + self.g_pre_net = nn.Conv1d(c_cond, c_cond, kernel_size=1) + else: + self.g_pre_net = nn.Sequential(*[ + nn.Conv1d(c_cond, c_cond, kernel_size=s * 2, stride=s, padding=s // 2) + for i, s in enumerate(strides) + ]) + self.encoder = FVAEEncoder(c_in_out, hidden_size, c_latent, kernel_size, + enc_n_layers, c_cond, strides=strides, nn_type=encoder_type) + if use_prior_flow: + self.prior_flow = ResFlow( + c_latent, flow_hidden, flow_kernel_size, flow_n_steps, 4, c_cond=c_cond) + self.decoder = FVAEDecoder(c_latent, hidden_size, c_in_out, kernel_size, + dec_n_layers, c_cond, strides=strides, nn_type=decoder_type) + self.prior_dist = dist.Normal(0, 1) + + def forward(self, x=None, nonpadding=None, cond=None, infer=False, noise_scale=1.0, **kwargs): + """ + + :param x: [B, C_in_out, T] + :param nonpadding: [B, 1, T] + :param cond: [B, C_g, T] + :return: + """ + if nonpadding is None: + nonpadding = 1 + cond_sqz = self.g_pre_net(cond) + if not infer: + z_q, m_q, logs_q, nonpadding_sqz = self.encoder(x, nonpadding, cond_sqz) + q_dist = dist.Normal(m_q, logs_q.exp()) + if self.use_prior_flow: + logqx = q_dist.log_prob(z_q) + z_p = self.prior_flow(z_q, nonpadding_sqz, cond_sqz) + logpx = self.prior_dist.log_prob(z_p) + loss_kl = ((logqx - logpx) * nonpadding_sqz).sum() / nonpadding_sqz.sum() / logqx.shape[1] + else: + loss_kl = torch.distributions.kl_divergence(q_dist, self.prior_dist) + loss_kl = (loss_kl * nonpadding_sqz).sum() / nonpadding_sqz.sum() / z_q.shape[1] + z_p = None + return z_q, loss_kl, z_p, m_q, logs_q + else: + latent_shape = [cond_sqz.shape[0], self.latent_size, cond_sqz.shape[2]] + z_p = torch.randn(latent_shape).to(cond.device) * noise_scale + if self.use_prior_flow: + z_p = self.prior_flow(z_p, 1, cond_sqz, reverse=True) + return z_p + + +class SyntaFVAE(nn.Module): + def __init__(self, + c_in_out, hidden_size, c_latent, + kernel_size, enc_n_layers, dec_n_layers, c_cond, strides, + use_prior_flow, flow_hidden=None, flow_kernel_size=None, flow_n_steps=None, + encoder_type='wn', decoder_type='wn'): + super(SyntaFVAE, self).__init__() + self.strides = strides + self.hidden_size = hidden_size + self.latent_size = c_latent + self.use_prior_flow = use_prior_flow + if np.prod(strides) == 1: + self.g_pre_net = nn.Conv1d(c_cond, c_cond, kernel_size=1) + else: + self.g_pre_net = nn.Sequential(*[ + nn.Conv1d(c_cond, c_cond, kernel_size=s * 2, stride=s, padding=s // 2) + for i, s in enumerate(strides) + ]) + self.encoder = FVAEEncoder(c_in_out, hidden_size, c_latent, kernel_size, + enc_n_layers, c_cond, strides=strides, nn_type=encoder_type) + if use_prior_flow: + self.prior_flow = ResFlow( + c_latent, flow_hidden, flow_kernel_size, flow_n_steps, 4, c_cond=c_cond) + self.decoder = FVAEDecoder(c_latent, hidden_size, c_in_out, kernel_size, + dec_n_layers, c_cond, strides=strides, nn_type=decoder_type) + self.prior_dist = dist.Normal(0, 1) + self.graph_encoder = GraphAuxEnc(in_dim=hidden_size, hid_dim=hidden_size,out_dim=hidden_size) + + def forward(self, x=None, nonpadding=None, cond=None, infer=False, noise_scale=1.0, + mel2word=None, ph2word=None, graph_lst=None, etypes_lst=None): + """ + + :param x: target mel, [B, C_in_out, T] + :param nonpadding: [B, 1, T] + :param cond: phoneme encoding, [B, C_g, T] + :return: + """ + word_len = ph2word.max(dim=1)[0] + ph_encoding_for_graph = cond.detach() + 0.1 * (cond - cond.detach()) # only 0.1x grad can pass through + _, ph_out_word_encoding_for_graph = GraphAuxEnc.ph_encoding_to_word_encoding(ph_encoding_for_graph.transpose(1,2), mel2word, word_len) + t_m = mel2word.shape[-1] + g_graph = self.graph_encoder.word_forward(graph_lst=graph_lst, word_encoding=ph_out_word_encoding_for_graph, etypes_lst=etypes_lst) + g_graph = g_graph.transpose(1,2) + g_graph = GraphAuxEnc._postprocess_word2ph(g_graph,mel2word,t_m) + g_graph = g_graph.transpose(1,2) + cond = cond + g_graph * 1. + + if nonpadding is None: + nonpadding = 1 + cond_sqz = self.g_pre_net(cond) + if not infer: + z_q, m_q, logs_q, nonpadding_sqz = self.encoder(x, nonpadding, cond_sqz) + q_dist = dist.Normal(m_q, logs_q.exp()) + if self.use_prior_flow: + logqx = q_dist.log_prob(z_q) + z_p = self.prior_flow(z_q, nonpadding_sqz, cond_sqz) + logpx = self.prior_dist.log_prob(z_p) + loss_kl = ((logqx - logpx) * nonpadding_sqz).sum() / nonpadding_sqz.sum() / logqx.shape[1] + else: + loss_kl = torch.distributions.kl_divergence(q_dist, self.prior_dist) + loss_kl = (loss_kl * nonpadding_sqz).sum() / nonpadding_sqz.sum() / z_q.shape[1] + z_p = None + return z_q, loss_kl, z_p, m_q, logs_q + else: + latent_shape = [cond_sqz.shape[0], self.latent_size, cond_sqz.shape[2]] + z_p = torch.randn(latent_shape).to(cond.device) * noise_scale + if self.use_prior_flow: + z_p = self.prior_flow(z_p, 1, cond_sqz, reverse=True) + return z_p \ No newline at end of file diff --git a/NeuralSeq/modules/portaspeech/portaspeech.py b/NeuralSeq/modules/portaspeech/portaspeech.py new file mode 100644 index 0000000000000000000000000000000000000000..e313add6037ae87f41361633fcf6d92f39e8fd92 --- /dev/null +++ b/NeuralSeq/modules/portaspeech/portaspeech.py @@ -0,0 +1,230 @@ +import math +import torch +from torch import nn +from torch.nn import Linear + +from modules.commons.conv import ConvBlocks, ConditionalConvBlocks +from modules.commons.common_layers import Embedding +from modules.commons.rel_transformer import RelTransformerEncoder +from modules.commons.transformer import MultiheadAttention, FFTBlocks +from modules.commons.align_ops import clip_mel2token_to_multiple, build_word_mask, expand_states, mel2ph_to_mel2word +from modules.portaspeech.fs import FS_DECODERS, FastSpeech +from modules.portaspeech.fvae import FVAE +from utils.tts_utils import group_hidden_by_segs +from utils.hparams import hparams + +class SinusoidalPosEmb(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x): + """ + + :param x: [B, T] + :return: [B, T, H] + """ + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device) * -emb) + emb = x[:, :, None] * emb[None, :] + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +class PortaSpeech(FastSpeech): + def __init__(self, ph_dictionary, word_dictionary, out_dims=None): + super().__init__(ph_dictionary, out_dims) + # build linguistic encoder + if hparams['use_word_encoder']: + # default False, use independent word embedding instead of phoneme encoding to represent word + self.word_encoder = RelTransformerEncoder( + len(word_dictionary), self.hidden_size, self.hidden_size, self.hidden_size, 2, + hparams['word_enc_layers'], hparams['enc_ffn_kernel_size']) + if hparams['dur_level'] == 'word': + if hparams['word_encoder_type'] == 'rel_fft': + self.ph2word_encoder = RelTransformerEncoder( + 0, self.hidden_size, self.hidden_size, self.hidden_size, 2, + hparams['word_enc_layers'], hparams['enc_ffn_kernel_size']) + if hparams['word_encoder_type'] == 'fft': + self.ph2word_encoder = FFTBlocks( + self.hidden_size, hparams['word_enc_layers'], 1, num_heads=hparams['num_heads']) + self.sin_pos = SinusoidalPosEmb(self.hidden_size) + self.enc_pos_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) + self.dec_query_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) + self.dec_res_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) + self.attn = MultiheadAttention(self.hidden_size, 1, encoder_decoder_attention=True, bias=False) + self.attn.enable_torch_version = False + if hparams['text_encoder_postnet']: + self.text_encoder_postnet = ConvBlocks( + self.hidden_size, self.hidden_size, [1] * 3, 5, layers_in_block=2) + else: + self.sin_pos = SinusoidalPosEmb(self.hidden_size) + # build VAE decoder + if hparams['use_fvae']: + del self.decoder + del self.mel_out + self.fvae = FVAE( + c_in_out=self.out_dims, + hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'], + kernel_size=hparams['fvae_kernel_size'], + enc_n_layers=hparams['fvae_enc_n_layers'], + dec_n_layers=hparams['fvae_dec_n_layers'], + c_cond=self.hidden_size, + use_prior_flow=hparams['use_prior_flow'], + flow_hidden=hparams['prior_flow_hidden'], + flow_kernel_size=hparams['prior_flow_kernel_size'], + flow_n_steps=hparams['prior_flow_n_blocks'], + strides=[hparams['fvae_strides']], + encoder_type=hparams['fvae_encoder_type'], + decoder_type=hparams['fvae_decoder_type'], + ) + else: + self.decoder = FS_DECODERS[hparams['decoder_type']](hparams) + self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True) + if hparams['use_pitch_embed']: + self.pitch_embed = Embedding(300, self.hidden_size, 0) + if hparams['add_word_pos']: + self.word_pos_proj = Linear(self.hidden_size, self.hidden_size) + + def build_embedding(self, dictionary, embed_dim): + num_embeddings = len(dictionary) + emb = Embedding(num_embeddings, embed_dim, self.padding_idx) + return emb + + def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None, + spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None, + global_step=None, *args, **kwargs): + ret = {} + style_embed = self.forward_style_embed(spk_embed, spk_id) + x, tgt_nonpadding = self.run_text_encoder( + txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, **kwargs) + x = x * tgt_nonpadding + ret['nonpadding'] = tgt_nonpadding + if hparams['use_pitch_embed']: + x = x + self.pitch_embed(pitch) + ret['decoder_inp'] = x + ret['mel_out_fvae'] = ret['mel_out'] = self.run_decoder(x, tgt_nonpadding, ret, infer, tgt_mels, global_step) + return ret + + def run_text_encoder(self, txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, **kwargs): + word2word = torch.arange(word_len)[None, :].to(ph2word.device) + 1 # [B, T_mel, T_word] + src_nonpadding = (txt_tokens > 0).float()[:, :, None] + use_bert = hparams.get("use_bert") is True + if use_bert: + ph_encoder_out = self.ph_encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=ph2word, + graph_lst=kwargs['graph_lst'], etypes_lst=kwargs['etypes_lst'], + cl_feats=kwargs['cl_feats'], ret=ret) * src_nonpadding + style_embed + else: + ph_encoder_out = self.ph_encoder(txt_tokens) * src_nonpadding + style_embed + if hparams['use_word_encoder']: + word_encoder_out = self.word_encoder(word_tokens) + style_embed + ph_encoder_out = ph_encoder_out + expand_states(word_encoder_out, ph2word) + if hparams['dur_level'] == 'word': + word_encoder_out = 0 + h_ph_gb_word = group_hidden_by_segs(ph_encoder_out, ph2word, word_len)[0] + word_encoder_out = word_encoder_out + self.ph2word_encoder(h_ph_gb_word) + if hparams['use_word_encoder']: + word_encoder_out = word_encoder_out + self.word_encoder(word_tokens) + mel2word = self.forward_dur(ph_encoder_out, mel2word, ret, ph2word=ph2word, word_len=word_len) + mel2word = clip_mel2token_to_multiple(mel2word, hparams['frames_multiple']) + tgt_nonpadding = (mel2word > 0).float()[:, :, None] + enc_pos = self.get_pos_embed(word2word, ph2word) # [B, T_ph, H] + dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H] + dec_word_mask = build_word_mask(mel2word, ph2word) # [B, T_mel, T_ph] + x, weight = self.attention(ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask) + if hparams['add_word_pos']: + x = x + self.word_pos_proj(dec_pos) + ret['attn'] = weight + else: + mel2ph = self.forward_dur(ph_encoder_out, mel2ph, ret) + mel2ph = clip_mel2token_to_multiple(mel2ph, hparams['frames_multiple']) + mel2word = mel2ph_to_mel2word(mel2ph, ph2word) + x = expand_states(ph_encoder_out, mel2ph) + if hparams['add_word_pos']: + dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H] + x = x + self.word_pos_proj(dec_pos) + tgt_nonpadding = (mel2ph > 0).float()[:, :, None] + if hparams['use_word_encoder']: + x = x + expand_states(word_encoder_out, mel2word) + return x, tgt_nonpadding + + def attention(self, ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask): + ph_kv = self.enc_pos_proj(torch.cat([ph_encoder_out, enc_pos], -1)) + word_enc_out_expend = expand_states(word_encoder_out, mel2word) + word_enc_out_expend = torch.cat([word_enc_out_expend, dec_pos], -1) + if hparams['text_encoder_postnet']: + word_enc_out_expend = self.dec_res_proj(word_enc_out_expend) + word_enc_out_expend = self.text_encoder_postnet(word_enc_out_expend) + dec_q = x_res = word_enc_out_expend + else: + dec_q = self.dec_query_proj(word_enc_out_expend) + x_res = self.dec_res_proj(word_enc_out_expend) + ph_kv, dec_q = ph_kv.transpose(0, 1), dec_q.transpose(0, 1) + x, (weight, _) = self.attn(dec_q, ph_kv, ph_kv, attn_mask=(1 - dec_word_mask) * -1e9) + x = x.transpose(0, 1) + x = x + x_res + return x, weight + + def run_decoder(self, x, tgt_nonpadding, ret, infer, tgt_mels=None, global_step=0): + if not hparams['use_fvae']: + x = self.decoder(x) + x = self.mel_out(x) + ret['kl'] = 0 + return x * tgt_nonpadding + else: + decoder_inp = x + x = x.transpose(1, 2) # [B, H, T] + tgt_nonpadding_BHT = tgt_nonpadding.transpose(1, 2) # [B, H, T] + if infer: + z = self.fvae(cond=x, infer=True) + else: + tgt_mels = tgt_mels.transpose(1, 2) # [B, 80, T] + z, ret['kl'], ret['z_p'], ret['m_q'], ret['logs_q'] = self.fvae( + tgt_mels, tgt_nonpadding_BHT, cond=x) + if global_step < hparams['posterior_start_steps']: + z = torch.randn_like(z) + x_recon = self.fvae.decoder(z, nonpadding=tgt_nonpadding_BHT, cond=x).transpose(1, 2) + ret['pre_mel_out'] = x_recon + return x_recon + + def forward_dur(self, dur_input, mel2word, ret, **kwargs): + """ + + :param dur_input: [B, T_txt, H] + :param mel2ph: [B, T_mel] + :param txt_tokens: [B, T_txt] + :param ret: + :return: + """ + src_padding = dur_input.data.abs().sum(-1) == 0 + dur_input = dur_input.detach() + hparams['predictor_grad'] * (dur_input - dur_input.detach()) + dur = self.dur_predictor(dur_input, src_padding) + if hparams['dur_level'] == 'word': + word_len = kwargs['word_len'] + ph2word = kwargs['ph2word'] + B, T_ph = ph2word.shape + dur = torch.zeros([B, word_len.max() + 1]).to(ph2word.device).scatter_add(1, ph2word, dur) + dur = dur[:, 1:] + ret['dur'] = dur + if mel2word is None: + mel2word = self.length_regulator(dur).detach() + return mel2word + + def get_pos_embed(self, word2word, x2word): + x_pos = build_word_mask(word2word, x2word).float() # [B, T_word, T_ph] + x_pos = (x_pos.cumsum(-1) / x_pos.sum(-1).clamp(min=1)[..., None] * x_pos).sum(1) + x_pos = self.sin_pos(x_pos.float()) # [B, T_ph, H] + return x_pos + + def store_inverse_all(self): + def remove_weight_norm(m): + try: + if hasattr(m, 'store_inverse'): + m.store_inverse() + nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(remove_weight_norm) diff --git a/NeuralSeq/modules/portaspeech/portaspeech_flow.py b/NeuralSeq/modules/portaspeech/portaspeech_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..7d7c563365c142b43679848fead9a65ae2bb5746 --- /dev/null +++ b/NeuralSeq/modules/portaspeech/portaspeech_flow.py @@ -0,0 +1,75 @@ +import torch +import torch.distributions as dist +from torch import nn +from modules.commons.normalizing_flow.glow_modules import Glow +from modules.portaspeech.portaspeech import PortaSpeech +from utils.hparams import hparams + +class PortaSpeechFlow(PortaSpeech): + def __init__(self, ph_dict_size, word_dict_size, out_dims=None): + super().__init__(ph_dict_size, word_dict_size, out_dims) + cond_hs = 80 + if hparams.get('use_txt_cond', True): + cond_hs = cond_hs + hparams['hidden_size'] + if hparams.get('use_latent_cond', False): + cond_hs = cond_hs + hparams['latent_size'] + if hparams['use_cond_proj']: + self.g_proj = nn.Conv1d(cond_hs, 160, 5, padding=2) + cond_hs = 160 + self.post_flow = Glow( + 80, hparams['post_glow_hidden'], hparams['post_glow_kernel_size'], 1, + hparams['post_glow_n_blocks'], hparams['post_glow_n_block_layers'], + n_split=4, n_sqz=2, + gin_channels=cond_hs, + share_cond_layers=hparams['post_share_cond_layers'], + share_wn_layers=hparams['share_wn_layers'], + sigmoid_scale=hparams['sigmoid_scale'] + ) + self.prior_dist = dist.Normal(0, 1) + + def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None, + spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None, + forward_post_glow=True, two_stage=True, global_step=None, **kwargs): + is_training = self.training + train_fvae = not (forward_post_glow and two_stage) + if not train_fvae: + self.eval() + with torch.set_grad_enabled(mode=train_fvae): + ret = super(PortaSpeechFlow, self).forward( + txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, + spk_embed, spk_id, pitch, infer, tgt_mels, global_step, **kwargs) + if (forward_post_glow or not two_stage) and hparams['use_post_flow']: + self.run_post_glow(tgt_mels, infer, is_training, ret) + return ret + + def run_post_glow(self, tgt_mels, infer, is_training, ret): + x_recon = ret['mel_out'].transpose(1, 2) + g = x_recon + B, _, T = g.shape + if hparams.get('use_txt_cond', True): + g = torch.cat([g, ret['decoder_inp'].transpose(1, 2)], 1) + if hparams.get('use_latent_cond', False): + g_z = ret['z_p'][:, :, :, None].repeat(1, 1, 1, 4).reshape(B, -1, T) + g = torch.cat([g, g_z], 1) + if hparams['use_cond_proj']: + g = self.g_proj(g) + prior_dist = self.prior_dist + if not infer: + if is_training: + self.post_flow.train() + nonpadding = ret['nonpadding'].transpose(1, 2) + y_lengths = nonpadding.sum(-1) + if hparams['detach_postflow_input']: + g = g.detach() + tgt_mels = tgt_mels.transpose(1, 2) + z_postflow, ldj = self.post_flow(tgt_mels, nonpadding, g=g) + ldj = ldj / y_lengths / 80 + ret['z_pf'], ret['ldj_pf'] = z_postflow, ldj + ret['postflow'] = -prior_dist.log_prob(z_postflow).mean() - ldj.mean() + if torch.isnan(ret['postflow']): + ret['postflow'] = None + else: + nonpadding = torch.ones_like(x_recon[:, :1, :]) + z_post = torch.randn(x_recon.shape).to(g.device) * hparams['noise_scale'] + x_recon, _ = self.post_flow(z_post, nonpadding, g, reverse=True) + ret['mel_out'] = x_recon.transpose(1, 2) diff --git a/NeuralSeq/modules/syntaspeech/multi_window_disc.py b/NeuralSeq/modules/syntaspeech/multi_window_disc.py new file mode 100644 index 0000000000000000000000000000000000000000..a8166ac5b514e501043b9fed13aab01421a6c10e --- /dev/null +++ b/NeuralSeq/modules/syntaspeech/multi_window_disc.py @@ -0,0 +1,136 @@ +import numpy as np +import torch +import torch.nn as nn + + +class SingleWindowDisc(nn.Module): + def __init__(self, time_length, freq_length=80, kernel=(3, 3), c_in=1, hidden_size=128): + super().__init__() + padding = (kernel[0] // 2, kernel[1] // 2) + self.model = nn.ModuleList([ + nn.Sequential(*[ + nn.Conv2d(c_in, hidden_size, kernel, (2, 2), padding), + nn.LeakyReLU(0.2, inplace=True), + nn.Dropout2d(0.25), + nn.BatchNorm2d(hidden_size, 0.8) + ]), + nn.Sequential(*[ + nn.Conv2d(hidden_size, hidden_size, kernel, (2, 2), padding), + nn.LeakyReLU(0.2, inplace=True), + nn.Dropout2d(0.25), + nn.BatchNorm2d(hidden_size, 0.8) + ]), + nn.Sequential(*[ + nn.Conv2d(hidden_size, hidden_size, kernel, (2, 2), padding), + nn.LeakyReLU(0.2, inplace=True), + nn.Dropout2d(0.25), + ]), + ]) + ds_size = (time_length // 2 ** 3, (freq_length + 7) // 2 ** 3) + self.adv_layer = nn.Linear(hidden_size * ds_size[0] * ds_size[1], 1) + + def forward(self, x): + """ + :param x: [B, C, T, n_bins] + :return: validity: [B, 1], h: List of hiddens + """ + h = [] + for l in self.model: + x = l(x) + h.append(x) + x = x.view(x.shape[0], -1) + validity = self.adv_layer(x) # [B, 1] + return validity, h + + +class MultiWindowDiscriminator(nn.Module): + def __init__(self, time_lengths, freq_length=80, kernel=(3, 3), c_in=1, hidden_size=128): + super(MultiWindowDiscriminator, self).__init__() + self.win_lengths = time_lengths + self.discriminators = nn.ModuleList() + + for time_length in time_lengths: + self.discriminators += [SingleWindowDisc(time_length, freq_length, kernel, c_in=c_in, hidden_size=hidden_size)] + + def forward(self, x, x_len, start_frames_wins=None): + ''' + Args: + x (tensor): input mel, (B, c_in, T, n_bins). + x_length (tensor): len of per mel. (B,). + + Returns: + tensor : (B). + ''' + validity = [] + if start_frames_wins is None: + start_frames_wins = [None] * len(self.discriminators) + h = [] + for i, start_frames in zip(range(len(self.discriminators)), start_frames_wins): + x_clip, start_frames = self.clip(x, x_len, self.win_lengths[i], start_frames) # (B, win_length, C) + start_frames_wins[i] = start_frames + if x_clip is None: + continue + x_clip, h_ = self.discriminators[i](x_clip) + h += h_ + validity.append(x_clip) + if len(validity) != len(self.discriminators): + return None, start_frames_wins, h + validity = sum(validity) # [B] + return validity, start_frames_wins, h + + def clip(self, x, x_len, win_length, start_frames=None): + '''Ramdom clip x to win_length. + Args: + x (tensor) : (B, c_in, T, n_bins). + cond (tensor) : (B, T, H). + x_len (tensor) : (B,). + win_length (int): target clip length + + Returns: + (tensor) : (B, c_in, win_length, n_bins). + + ''' + T_start = 0 + T_end = x_len.max() - win_length + if T_end < 0: + return None, None, start_frames + T_end = T_end.item() + if start_frames is None: + start_frame = np.random.randint(low=T_start, high=T_end + 1) + start_frames = [start_frame] * x.size(0) + else: + start_frame = start_frames[0] + x_batch = x[:, :, start_frame: start_frame + win_length] + return x_batch, start_frames + + +class Discriminator(nn.Module): + def __init__(self, time_lengths=[32, 64, 128], freq_length=80, kernel=(3, 3), c_in=1, + hidden_size=128): + super(Discriminator, self).__init__() + self.time_lengths = time_lengths + self.discriminator = MultiWindowDiscriminator( + freq_length=freq_length, + time_lengths=time_lengths, + kernel=kernel, + c_in=c_in, hidden_size=hidden_size + ) + + + def forward(self, x, start_frames_wins=None): + """ + + :param x: [B, T, 80] + :param return_y_only: + :return: + """ + if len(x.shape) == 3: + x = x[:, None, :, :] # [B,1,T,80] + x_len = x.sum([1, -1]).ne(0).int().sum([-1]) + ret = {'y_c': None, 'y': None} + ret['y'], start_frames_wins, ret['h'] = self.discriminator( + x, x_len, start_frames_wins=start_frames_wins) + + ret['start_frames_wins'] = start_frames_wins + return ret + diff --git a/NeuralSeq/modules/syntaspeech/syntactic_graph_buider.py b/NeuralSeq/modules/syntaspeech/syntactic_graph_buider.py new file mode 100644 index 0000000000000000000000000000000000000000..6334b6a0435390abb05f04638afc6e2a57a8f000 --- /dev/null +++ b/NeuralSeq/modules/syntaspeech/syntactic_graph_buider.py @@ -0,0 +1,294 @@ +from copy import deepcopy +import torch +import dgl +import stanza +import networkx as nx + +class Sentence2GraphParser: + def __init__(self, language='zh', use_gpu=False, download=False): + self.language = language + if download: + self.stanza_parser = stanza.Pipeline(lang=language, use_gpu=use_gpu) + else: + self.stanza_parser = stanza.Pipeline(lang=language, use_gpu=use_gpu, download_method=None) + + def parse(self, clean_sentence=None, words=None, ph_words=None): + if self.language == 'zh': + assert words is not None and ph_words is not None + ret = self._parse_zh(words, ph_words) + elif self.language == 'en': + assert clean_sentence is not None + ret = self._parse_en(clean_sentence) + else: + raise NotImplementedError + return ret + + def _parse_zh(self, words, ph_words, enable_backward_edge=True, enable_recur_edge=True, + enable_inter_sentence_edge=True, sequential_edge=False): + """ + words: , each character in chinese is one item + ph_words: , each character in chinese is one item, represented by the phoneme + Example: + text1 = '宝马配挂跛骡鞍,貂蝉怨枕董翁榻.' + words = ['', '宝', '马', '配', '挂', '跛', '骡', '鞍', ',' + , '貂', '蝉', '怨', '枕', '董', '翁', '榻', ''] + ph_words = ['', 'b_ao3_|', 'm_a3_#', 'p_ei4_|', 'g_ua4_#', + 'b_o3_#', 'l_uo2_|', 'an1', ',', 'd_iao1_|', + 'ch_an2_#', 'van4_#', 'zh_en3_#', 'd_ong3_|', 'ueng1_#', 't_a4', ''] + """ + words, ph_words = words[1:-1], ph_words[1:-1] # delete and + for i, p_w in enumerate(ph_words): + if p_w == ',': + # change english ',' into chinese + # we found it necessary in stanza's dependency parsing + words[i], ph_words[i] = ',', ',' + tmp_words = deepcopy(words) + num_added_space = 0 + for i, p_w in enumerate(ph_words): + if p_w.endswith("#"): + # add a blank after the p_w with '#', to separate words + tmp_words.insert(num_added_space + i + 1, " ") + num_added_space += 1 + if p_w in [',', ',']: + # add one blank before and after ', ', respectively + tmp_words.insert(num_added_space + i + 1, " ") # insert behind ',' first + tmp_words.insert(num_added_space + i, " ") # insert before + num_added_space += 2 + clean_text = ''.join(tmp_words).strip() + parser_out = self.stanza_parser(clean_text) + + idx_to_word = {i + 1: w for i, w in enumerate(words)} + + vocab_nodes = {} + vocab_idx_offset = 0 + for sentence in parser_out.sentences: + num_nodes_in_current_sentence = 0 + for vocab_node in sentence.words: + num_nodes_in_current_sentence += 1 + vocab_idx = vocab_node.id + vocab_idx_offset + vocab_text = vocab_node.text.replace(" ", "") # delete blank in vocab + vocab_nodes[vocab_idx] = vocab_text + vocab_idx_offset += num_nodes_in_current_sentence + + # start vocab-to-word alignment + vocab_to_word = {} + current_word_idx = 1 + for vocab_i in vocab_nodes.keys(): + vocab_to_word[vocab_i] = [] + for w_in_vocab_i in vocab_nodes[vocab_i]: + if w_in_vocab_i != idx_to_word[current_word_idx]: + raise ValueError("Word Mismatch!") + vocab_to_word[vocab_i].append(current_word_idx) # add a path (vocab_node_idx, word_global_idx) + current_word_idx += 1 + + # then we compute the vocab-level edges + if len(parser_out.sentences) > 5: + print("Detect more than 5 input sentence! pls check whether the sentence is too long!") + vocab_level_source_id, vocab_level_dest_id = [], [] + vocab_level_edge_types = [] + sentences_heads = [] + vocab_id_offset = 0 + # get forward edges + for s in parser_out.sentences: + for w in s.words: + w_idx = w.id + vocab_id_offset # it starts from 1, just same as binarizer + w_dest_idx = w.head + vocab_id_offset + if w.head == 0: + sentences_heads.append(w_idx) + continue + vocab_level_source_id.append(w_idx) + vocab_level_dest_id.append(w_dest_idx) + vocab_id_offset += len(s.words) + vocab_level_edge_types += [0] * len(vocab_level_source_id) + num_vocab = vocab_id_offset + + # optional: get backward edges + if enable_backward_edge: + back_source, back_dest = deepcopy(vocab_level_dest_id), deepcopy(vocab_level_source_id) + vocab_level_source_id += back_source + vocab_level_dest_id += back_dest + vocab_level_edge_types += [1] * len(back_source) + + # optional: get inter-sentence edges if num_sentences > 1 + inter_sentence_source, inter_sentence_dest = [], [] + if enable_inter_sentence_edge and len(sentences_heads) > 1: + def get_full_graph_edges(nodes): + tmp_edges = [] + for i, node_i in enumerate(nodes): + for j, node_j in enumerate(nodes): + if i == j: + continue + tmp_edges.append((node_i, node_j)) + return tmp_edges + + tmp_edges = get_full_graph_edges(sentences_heads) + for (source, dest) in tmp_edges: + inter_sentence_source.append(source) + inter_sentence_dest.append(dest) + vocab_level_source_id += inter_sentence_source + vocab_level_dest_id += inter_sentence_dest + vocab_level_edge_types += [3] * len(inter_sentence_source) + + if sequential_edge: + seq_source, seq_dest = list(range(1, num_vocab)) + list(range(num_vocab, 0, -1)), \ + list(range(2, num_vocab + 1)) + list(range(num_vocab - 1, -1, -1)) + vocab_level_source_id += seq_source + vocab_level_dest_id += seq_dest + vocab_level_edge_types += [4] * (num_vocab - 1) + [5] * (num_vocab - 1) + + # Then, we use the vocab-level edges and the vocab-to-word path, to construct the word-level graph + num_word = len(words) + source_id, dest_id, edge_types = [], [], [] + for (vocab_start, vocab_end, vocab_edge_type) in zip(vocab_level_source_id, vocab_level_dest_id, + vocab_level_edge_types): + # connect the first word in the vocab + word_start = min(vocab_to_word[vocab_start]) + word_end = min(vocab_to_word[vocab_end]) + source_id.append(word_start) + dest_id.append(word_end) + edge_types.append(vocab_edge_type) + + # sequential connection in words + for word_indices_in_v in vocab_to_word.values(): + for i, word_idx in enumerate(word_indices_in_v): + if i + 1 < len(word_indices_in_v): + source_id.append(word_idx) + dest_id.append(word_idx + 1) + edge_types.append(4) + if i - 1 >= 0: + source_id.append(word_idx) + dest_id.append(word_idx - 1) + edge_types.append(5) + + # optional: get recurrent edges + if enable_recur_edge: + recur_source, recur_dest = list(range(1, num_word + 1)), list(range(1, num_word + 1)) + source_id += recur_source + dest_id += recur_dest + edge_types += [2] * len(recur_source) + + # add and + source_id += [0, num_word + 1, 1, num_word] + dest_id += [1, num_word, 0, num_word + 1] + edge_types += [4, 4, 5, 5] # 4 represents sequentially forward, 5 is sequential backward + + edges = (torch.LongTensor(source_id), torch.LongTensor(dest_id)) + dgl_graph = dgl.graph(edges) + assert dgl_graph.num_edges() == len(edge_types) + return dgl_graph, torch.LongTensor(edge_types) + + def _parse_en(self, clean_sentence, enable_backward_edge=True, enable_recur_edge=True, + enable_inter_sentence_edge=True, sequential_edge=False, consider_bos_for_index=True): + """ + clean_sentence: , each word or punctuation should be separated by one blank. + """ + edge_types = [] # required for gated graph neural network + clean_sentence = clean_sentence.strip() + if clean_sentence.endswith((" .", " ,", " ;", " :", " ?", " !")): + clean_sentence = clean_sentence[:-2] + if clean_sentence.startswith(". "): + clean_sentence = clean_sentence[2:] + parser_out = self.stanza_parser(clean_sentence) + if len(parser_out.sentences) > 5: + print("Detect more than 5 input sentence! pls check whether the sentence is too long!") + print(clean_sentence) + source_id, dest_id = [], [] + sentences_heads = [] + word_id_offset = 0 + # get forward edges + for s in parser_out.sentences: + for w in s.words: + w_idx = w.id + word_id_offset # it starts from 1, just same as binarizer + w_dest_idx = w.head + word_id_offset + if w.head == 0: + sentences_heads.append(w_idx) + continue + source_id.append(w_idx) + dest_id.append(w_dest_idx) + word_id_offset += len(s.words) + num_word = word_id_offset + edge_types += [0] * len(source_id) + + # optional: get backward edges + if enable_backward_edge: + back_source, back_dest = deepcopy(dest_id), deepcopy(source_id) + source_id += back_source + dest_id += back_dest + edge_types += [1] * len(back_source) + + # optional: get recurrent edges + if enable_recur_edge: + recur_source, recur_dest = list(range(1, num_word + 1)), list(range(1, num_word + 1)) + source_id += recur_source + dest_id += recur_dest + edge_types += [2] * len(recur_source) + + # optional: get inter-sentence edges if num_sentences > 1 + inter_sentence_source, inter_sentence_dest = [], [] + if enable_inter_sentence_edge and len(sentences_heads) > 1: + def get_full_graph_edges(nodes): + tmp_edges = [] + for i, node_i in enumerate(nodes): + for j, node_j in enumerate(nodes): + if i == j: + continue + tmp_edges.append((node_i, node_j)) + return tmp_edges + + tmp_edges = get_full_graph_edges(sentences_heads) + for (source, dest) in tmp_edges: + inter_sentence_source.append(source) + inter_sentence_dest.append(dest) + source_id += inter_sentence_source + dest_id += inter_sentence_dest + edge_types += [3] * len(inter_sentence_source) + + # add and + source_id += [0, num_word + 1, 1, num_word] + dest_id += [1, num_word, 0, num_word + 1] + edge_types += [4, 4, 5, 5] # 4 represents sequentially forward, 5 is sequential backward + + # optional: sequential edge + if sequential_edge: + seq_source, seq_dest = list(range(1, num_word)) + list(range(num_word, 0, -1)), \ + list(range(2, num_word + 1)) + list(range(num_word - 1, -1, -1)) + source_id += seq_source + dest_id += seq_dest + edge_types += [4] * (num_word - 1) + [5] * (num_word - 1) + if consider_bos_for_index: + edges = (torch.LongTensor(source_id), torch.LongTensor(dest_id)) + else: + edges = (torch.LongTensor(source_id) - 1, torch.LongTensor(dest_id) - 1) + dgl_graph = dgl.graph(edges) + assert dgl_graph.num_edges() == len(edge_types) + return dgl_graph, torch.LongTensor(edge_types) + + +def plot_dgl_sentence_graph(dgl_graph, labels): + """ + labels = {idx: word for idx,word in enumerate(sentence.split(" ")) } + """ + import matplotlib.pyplot as plt + nx_graph = dgl_graph.to_networkx() + pos = nx.random_layout(nx_graph) + nx.draw(nx_graph, pos, with_labels=False) + nx.draw_networkx_labels(nx_graph, pos, labels) + plt.show() + +if __name__ == '__main__': + + # Unit Test for Chinese Graph Builder + parser = Sentence2GraphParser("zh") + text1 = '宝马配挂跛骡鞍,貂蝉怨枕董翁榻.' + words = ['', '宝', '马', '配', '挂', '跛', '骡', '鞍', ',', '貂', '蝉', '怨', '枕', '董', '翁', '榻', ''] + ph_words = ['', 'b_ao3_|', 'm_a3_#', 'p_ei4_|', 'g_ua4_#', 'b_o3_#', 'l_uo2_|', 'an1', ',', 'd_iao1_|', + 'ch_an2_#', 'van4_#', 'zh_en3_#', 'd_ong3_|', 'ueng1_#', 't_a4', ''] + graph1, etypes1 = parser.parse(text1, words, ph_words) + plot_dgl_sentence_graph(graph1, {i: w for i, w in enumerate(ph_words)}) + + # Unit Test for English Graph Builder + parser = Sentence2GraphParser("en") + text2 = "I love you . You love me . Mixue ice-scream and tea ." + graph2, etypes2 = parser.parse(text2) + plot_dgl_sentence_graph(graph2, {i: w for i, w in enumerate((" " + text2 + " ").split(" "))}) + diff --git a/NeuralSeq/modules/syntaspeech/syntactic_graph_encoder.py b/NeuralSeq/modules/syntaspeech/syntactic_graph_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..d703ae2c986b231a92ce468500ceb927d2a6ce7c --- /dev/null +++ b/NeuralSeq/modules/syntaspeech/syntactic_graph_encoder.py @@ -0,0 +1,193 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import dgl +from dgl.nn.pytorch import GatedGraphConv + +def sequence_mask(lengths, maxlen, dtype=torch.bool): + if maxlen is None: + maxlen = lengths.max() + mask = ~(torch.ones((len(lengths), maxlen)).to(lengths.device).cumsum(dim=1).t() > lengths).t() + mask.type(dtype) + return mask + + +def group_hidden_by_segs(h, seg_ids, max_len): + """ + :param h: [B, T, H] + :param seg_ids: [B, T] + :return: h_ph: [B, T_ph, H] + """ + B, T, H = h.shape + h_gby_segs = h.new_zeros([B, max_len + 1, H]).scatter_add_(1, seg_ids[:, :, None].repeat([1, 1, H]), h) + all_ones = h.new_ones(h.shape[:2]) + cnt_gby_segs = h.new_zeros([B, max_len + 1]).scatter_add_(1, seg_ids, all_ones).contiguous() + h_gby_segs = h_gby_segs[:, 1:] + cnt_gby_segs = cnt_gby_segs[:, 1:] + h_gby_segs = h_gby_segs / torch.clamp(cnt_gby_segs[:, :, None], min=1) + # assert h_gby_segs.shape[-1] == 192 + return h_gby_segs + +class GraphAuxEnc(nn.Module): + def __init__(self, in_dim, hid_dim, out_dim, n_iterations=5, n_edge_types=6): + super(GraphAuxEnc, self).__init__() + self.in_dim = in_dim + self.hid_dim = hid_dim + self.out_dim = out_dim + self.skip_connect = True + self.dropout_after_gae = False + + self.ggc_1 = GatedGraphConv(in_feats=in_dim, out_feats=hid_dim + , n_steps=n_iterations, n_etypes=n_edge_types) + self.ggc_2 = GatedGraphConv(in_feats=hid_dim, out_feats=out_dim + , n_steps=n_iterations, n_etypes=n_edge_types) + self.dropout = nn.Dropout(p=0.5) + + @staticmethod + def ph_encoding_to_word_encoding(ph_encoding, ph2word, word_len): + """ + ph_encoding: [batch, t_p, hid] + ph2word: tensor [batch, t_w] + word_len: tensor [batch] + """ + word_encoding_for_graph, batch_word_encoding, has_word_row_idx = GraphAuxEnc._process_ph_to_word_encoding( + ph_encoding, + ph2word, + word_len) + # [batch, t_w, hid] + return batch_word_encoding, word_encoding_for_graph + + def pad_word_encoding_to_phoneme(self, word_encoding, ph2word, t_p): + return self._postprocess_word2ph(word_encoding, ph2word, t_p) + + @staticmethod + def _process_ph_to_word_encoding(ph_encoding, ph2word, word_len=None): + """ + ph_encoding: [batch, t_p, hid] + ph2word: tensor [batch, t_w] + word_len: tensor [batch] + """ + word_len = word_len.reshape([-1,]) + max_len = max(word_len) + num_nodes = sum(word_len) + + batch_word_encoding = group_hidden_by_segs(ph_encoding, ph2word, max_len) + bs, t_p, hid = batch_word_encoding.shape + has_word_mask = sequence_mask(word_len, max_len) # [batch, t_p, 1] + word_encoding = batch_word_encoding.reshape([bs * t_p, hid]) + has_word_row_idx = has_word_mask.reshape([-1]) + word_encoding = word_encoding[has_word_row_idx] + assert word_encoding.shape[0] == num_nodes + return word_encoding, batch_word_encoding, has_word_row_idx + + @staticmethod + def _postprocess_word2ph(word_encoding, ph2word, t_p): + word_encoding = F.pad(word_encoding,[0,0,1,0]) + ph2word_ = ph2word[:, :, None].repeat([1, 1, word_encoding.shape[-1]]) + out = torch.gather(word_encoding, 1, ph2word_) # [B, T, H] + return out + + @staticmethod + def _repeat_one_sequence(x, d, T): + """Repeat each frame according to duration.""" + if d.sum() == 0: + d = d.fill_(1) + hid = x.shape[-1] + expanded_lst = [x_.repeat(int(d_), 1) for x_, d_ in zip(x, d) if d_ != 0] + expanded = torch.cat(expanded_lst, dim=0) + if T > expanded.shape[0]: + expanded = torch.cat([expanded, torch.zeros([T - expanded.shape[0], hid]).to(expanded.device)], dim=0) + return expanded + + def word_forward(self, graph_lst, word_encoding, etypes_lst): + """ + word encoding in, word encoding out. + """ + batched_graph = dgl.batch(graph_lst) + inp = word_encoding + batched_etypes = torch.cat(etypes_lst) # [num_edges_in_batch, 1] + assert batched_graph.num_nodes() == inp.shape[0] + + gcc1_out = self.ggc_1(batched_graph, inp, batched_etypes) + if self.dropout_after_gae: + gcc1_out = self.dropout(gcc1_out) + gcc2_out = self.ggc_2(batched_graph, gcc1_out, batched_etypes) # [num_nodes_in_batch, hin] + if self.dropout_after_gae: + gcc2_out = self.ggc_2(batched_graph, gcc2_out, batched_etypes) + if self.skip_connect: + assert self.in_dim == self.hid_dim and self.hid_dim == self.out_dim + gcc2_out = inp + gcc1_out + gcc2_out + + word_len = torch.tensor([g.num_nodes() for g in graph_lst]).reshape([-1]) + max_len = max(word_len) + has_word_mask = sequence_mask(word_len, max_len) # [batch, t_p, 1] + has_word_row_idx = has_word_mask.reshape([-1]) + bs = len(graph_lst) + t_w = max([g.num_nodes() for g in graph_lst]) + hid = word_encoding.shape[-1] + output = torch.zeros([bs * t_w, hid]).to(gcc2_out.device) + output[has_word_row_idx] = gcc2_out + output = output.reshape([bs, t_w, hid]) + word_level_output = output + return torch.transpose(word_level_output, 1, 2) + + def forward(self, graph_lst, ph_encoding, ph2word, etypes_lst, return_word_encoding=False): + """ + graph_lst: [list of dgl_graph] + ph_encoding: [batch, hid, t_p] + ph2word: [list of list[1,2,2,2,3,3,3]] + etypes_lst: [list of etypes]; etypes: torch.LongTensor + """ + t_p = ph_encoding.shape[-1] + ph_encoding = ph_encoding.transpose(1,2) # [batch, t_p, hid] + word_len = torch.tensor([g.num_nodes() for g in graph_lst]).reshape([-1]) + batched_graph = dgl.batch(graph_lst) + inp, batched_word_encoding, has_word_row_idx = self._process_ph_to_word_encoding(ph_encoding, ph2word, + word_len=word_len) # [num_nodes_in_batch, in_dim] + bs, t_w, hid = batched_word_encoding.shape + batched_etypes = torch.cat(etypes_lst) # [num_edges_in_batch, 1] + gcc1_out = self.ggc_1(batched_graph, inp, batched_etypes) + gcc2_out = self.ggc_2(batched_graph, gcc1_out, batched_etypes) # [num_nodes_in_batch, hin] + # skip connection + gcc2_out = inp + gcc1_out + gcc2_out # [n_nodes, hid] + + output = torch.zeros([bs * t_w, hid]).to(gcc2_out.device) + output[has_word_row_idx] = gcc2_out + output = output.reshape([bs, t_w, hid]) + word_level_output = output + output = self._postprocess_word2ph(word_level_output, ph2word, t_p) # [batch, t_p, hid] + output = torch.transpose(output, 1, 2) + + if return_word_encoding: + return output, torch.transpose(word_level_output, 1, 2) + else: + return output + +if __name__ == '__main__': + # Unit Test for batching graphs + from modules.syntaspeech.syntactic_graph_buider import Sentence2GraphParser, plot_dgl_sentence_graph + parser = Sentence2GraphParser("en") + + # Unit Test for English Graph Builder + text1 = "To be or not to be , that 's a question ." + text2 = "I love you . You love me . Mixue ice-scream and tea ." + graph1, etypes1 = parser.parse(text1) + graph2, etypes2 = parser.parse(text2) + batched_text = " " + text1 + " " + " " + " " + text2 + " " + batched_nodes = [graph1.num_nodes(), graph2.num_nodes()] + plot_dgl_sentence_graph(dgl.batch([graph1, graph2]), {i: w for i, w in enumerate(batched_text.split(" "))}) + etypes_lst = [etypes1, etypes2] + + # Unit Test for Graph Encoder forward + in_feats = 4 + out_feats = 4 + enc = GraphAuxEnc(in_dim=in_feats, hid_dim=in_feats, out_dim=out_feats) + ph2word = torch.tensor([ + [1, 2, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0], + [1, 2, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + ]) + inp = torch.randn([2, in_feats, 17]) # [N_sentence, feat, ph_length] + graph_lst = [graph1, graph2] + out = enc(graph_lst, inp, ph2word, etypes_lst) + print(out.shape) # [N_sentence, feat, ph_length] diff --git a/NeuralSeq/modules/syntaspeech/syntaspeech.py b/NeuralSeq/modules/syntaspeech/syntaspeech.py new file mode 100644 index 0000000000000000000000000000000000000000..c74b6a53765026919ebd3e583861051441c508da --- /dev/null +++ b/NeuralSeq/modules/syntaspeech/syntaspeech.py @@ -0,0 +1,274 @@ +import math +import torch +from torch import nn +from torch.nn import Linear +from utils.hparams import hparams +from modules.commons.conv import ConvBlocks, ConditionalConvBlocks +from modules.commons.common_layers import Embedding +from modules.commons.rel_transformer import RelTransformerEncoder +from modules.commons.transformer import MultiheadAttention, FFTBlocks +from modules.commons.align_ops import clip_mel2token_to_multiple, build_word_mask, expand_states, mel2ph_to_mel2word +from modules.tts.fastspeech import FS_DECODERS, FastSpeech +from modules.portaspeech.fvae import SyntaFVAE, FVAE +from utils.nn.seq_utils import group_hidden_by_segs +from modules.fastspeech.tts_modules import SyntaDurationPredictor + + +class SinusoidalPosEmb(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + def forward(self, x): + """ + + :param x: [B, T] + :return: [B, T, H] + """ + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device) * -emb) + emb = x[:, :, None] * emb[None, :] + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +class SyntaSpeech(FastSpeech): + def __init__(self, ph_dict_size, word_dict_size, out_dims=None): + super().__init__(ph_dict_size, out_dims) + # build linguistic encoder + if hparams['num_spk'] > 1: + self.spk_embed_proj = Embedding(hparams['num_spk'], self.hidden_size) + if hparams['use_word_encoder']: + self.word_encoder = RelTransformerEncoder( + word_dict_size, self.hidden_size, self.hidden_size, self.hidden_size, 2, + hparams['word_enc_layers'], hparams['enc_ffn_kernel_size']) + if hparams['dur_level'] == 'word': + if hparams['word_encoder_type'] == 'rel_fft': + self.ph2word_encoder = RelTransformerEncoder( + 0, self.hidden_size, self.hidden_size, self.hidden_size, 2, + hparams['word_enc_layers'], hparams['enc_ffn_kernel_size']) + if hparams['word_encoder_type'] == 'fft': + self.ph2word_encoder = FFTBlocks( + self.hidden_size, hparams['word_enc_layers'], 1, num_heads=hparams['num_heads']) + self.sin_pos = SinusoidalPosEmb(self.hidden_size) + self.enc_pos_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) + self.dec_query_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) + self.dec_res_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) + self.attn = MultiheadAttention(self.hidden_size, 1, encoder_decoder_attention=True, bias=False) + self.attn.enable_torch_version = False + if hparams['text_encoder_postnet']: + self.text_encoder_postnet = ConvBlocks( + self.hidden_size, self.hidden_size, [1] * 3, 5, layers_in_block=2) + else: + self.sin_pos = SinusoidalPosEmb(self.hidden_size) + + predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size + self.dur_predictor = SyntaDurationPredictor( + self.hidden_size, + n_chans=predictor_hidden, + n_layers=hparams['dur_predictor_layers'], + dropout_rate=hparams['predictor_dropout'], + kernel_size=hparams['dur_predictor_kernel']) + # build VAE decoder + if hparams['use_fvae']: + del self.decoder + del self.mel_out + if hparams.get("use_gae_in_prior", True): + self.fvae = SyntaFVAE( + c_in_out=self.out_dims, + hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'], + kernel_size=hparams['fvae_kernel_size'], + enc_n_layers=hparams['fvae_enc_n_layers'], + dec_n_layers=hparams['fvae_dec_n_layers'], + c_cond=self.hidden_size, + use_prior_flow=hparams['use_prior_flow'], + flow_hidden=hparams['prior_flow_hidden'], + flow_kernel_size=hparams['prior_flow_kernel_size'], + flow_n_steps=hparams['prior_flow_n_blocks'], + strides=[hparams['fvae_strides']], + encoder_type=hparams['fvae_encoder_type'], + decoder_type=hparams['fvae_decoder_type'], + ) + else: + self.fvae = FVAE( + c_in_out=self.out_dims, + hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'], + kernel_size=hparams['fvae_kernel_size'], + enc_n_layers=hparams['fvae_enc_n_layers'], + dec_n_layers=hparams['fvae_dec_n_layers'], + c_cond=self.hidden_size, + use_prior_flow=hparams['use_prior_flow'], + flow_hidden=hparams['prior_flow_hidden'], + flow_kernel_size=hparams['prior_flow_kernel_size'], + flow_n_steps=hparams['prior_flow_n_blocks'], + strides=[hparams['fvae_strides']], + encoder_type=hparams['fvae_encoder_type'], + decoder_type=hparams['fvae_decoder_type'], + ) + else: + self.decoder = FS_DECODERS[hparams['decoder_type']](hparams) + self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True) + if hparams['use_pitch_embed']: + self.pitch_embed = Embedding(300, self.hidden_size, 0) + if hparams['add_word_pos']: + self.word_pos_proj = Linear(self.hidden_size, self.hidden_size) + + def build_embedding(self, dictionary, embed_dim): + num_embeddings = len(dictionary) + emb = Embedding(num_embeddings, embed_dim, self.padding_idx) + return emb + + def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None, + spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None, + global_step=None, graph_lst=None, etypes_lst=None, *args, **kwargs): + + if hparams['use_spk_embed']: + spk_embed = spk_embed + elif hparams['use_spk_id']: + spk_embed = self.spk_embed_proj(spk_id)[:, None, :] + else: + spk_embed = 0 + + ret = {} + style_embed = self.forward_style_embed(spk_embed, spk_id) # speaker embedding, [B, 1, C] + x, tgt_nonpadding = self.run_text_encoder( + txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, graph_lst=graph_lst, etypes_lst=etypes_lst, **kwargs) + x = x + style_embed # it maybe necessary to achieve multi-speaker + x = x * tgt_nonpadding + ret['nonpadding'] = tgt_nonpadding + if hparams['use_pitch_embed']: + x = x + self.pitch_embed(pitch) + ret['decoder_inp'] = x + if infer and (mel2ph is None or mel2word is None): + mel2word = ret['mel2word'] + ret['mel_out_fvae'] = ret['mel_out'] = self.run_decoder(x, tgt_nonpadding, ret, infer, tgt_mels, global_step, + mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst) + return ret + + def run_text_encoder(self, txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, graph_lst, etypes_lst, **kwargs): + word2word = torch.arange(word_len)[None, :].to(ph2word.device) + 1 # [B, T_mel, T_word] + src_nonpadding = (txt_tokens > 0).float()[:, :, None] + use_bert = hparams.get("use_bert") is True + if use_bert: + ph_encoder_out = self.encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=ph2word, + graph_lst=graph_lst, etypes_lst=etypes_lst, + cl_feats=kwargs['cl_feats'], ret=ret) * src_nonpadding + style_embed + else: + ph_encoder_out = self.encoder(txt_tokens) * src_nonpadding + style_embed + if hparams['use_word_encoder']: + word_encoder_out = self.word_encoder(word_tokens) + style_embed + ph_encoder_out = ph_encoder_out + expand_states(word_encoder_out, ph2word) + + dur_input = ph_encoder_out * src_nonpadding + if hparams['dur_level'] == 'word': + word_encoder_out = 0 + h_ph_gb_word = group_hidden_by_segs(ph_encoder_out, ph2word, word_len)[0] + word_encoder_out = word_encoder_out + self.ph2word_encoder(h_ph_gb_word) + if hparams['use_word_encoder']: + word_encoder_out = word_encoder_out + self.word_encoder(word_tokens) + mel2word = self.forward_dur(dur_input, mel2word, ret, ph2word=ph2word, word_len=word_len, graph_lst=graph_lst, etypes_lst=etypes_lst) + mel2word = clip_mel2token_to_multiple(mel2word, hparams['frames_multiple']) + ret['mel2word'] = mel2word + tgt_nonpadding = (mel2word > 0).float()[:, :, None] + enc_pos = self.get_pos_embed(word2word, ph2word) # [B, T_ph, H] + dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H] + dec_word_mask = build_word_mask(mel2word, ph2word) # [B, T_mel, T_ph] + x, weight = self.attention(ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask) + if hparams['add_word_pos']: + x = x + self.word_pos_proj(dec_pos) + ret['attn'] = weight + else: + mel2ph = self.forward_dur(dur_input, mel2ph, ret) + mel2ph = clip_mel2token_to_multiple(mel2ph, hparams['frames_multiple']) + mel2word = mel2ph_to_mel2word(mel2ph, ph2word) + x = expand_states(ph_encoder_out, mel2ph) + if hparams['add_word_pos']: + dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H] + x = x + self.word_pos_proj(dec_pos) + tgt_nonpadding = (mel2ph > 0).float()[:, :, None] + if hparams['use_word_encoder']: + x = x + expand_states(word_encoder_out, mel2word) + return x, tgt_nonpadding + + def attention(self, ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask): + ph_kv = self.enc_pos_proj(torch.cat([ph_encoder_out, enc_pos], -1)) + word_enc_out_expend = expand_states(word_encoder_out, mel2word) + word_enc_out_expend = torch.cat([word_enc_out_expend, dec_pos], -1) + if hparams['text_encoder_postnet']: + word_enc_out_expend = self.dec_res_proj(word_enc_out_expend) + word_enc_out_expend = self.text_encoder_postnet(word_enc_out_expend) + dec_q = x_res = word_enc_out_expend + else: + dec_q = self.dec_query_proj(word_enc_out_expend) + x_res = self.dec_res_proj(word_enc_out_expend) + ph_kv, dec_q = ph_kv.transpose(0, 1), dec_q.transpose(0, 1) + x, (weight, _) = self.attn(dec_q, ph_kv, ph_kv, attn_mask=(1 - dec_word_mask) * -1e9) + x = x.transpose(0, 1) + x = x + x_res + return x, weight + + def run_decoder(self, x, tgt_nonpadding, ret, infer, tgt_mels=None, global_step=0, + mel2word=None, ph2word=None, graph_lst=None, etypes_lst=None): + if not hparams['use_fvae']: + x = self.decoder(x) + x = self.mel_out(x) + ret['kl'] = 0 + return x * tgt_nonpadding + else: + # x is the phoneme encoding + x = x.transpose(1, 2) # [B, H, T] + tgt_nonpadding_BHT = tgt_nonpadding.transpose(1, 2) # [B, H, T] + if infer: + z = self.fvae(cond=x, infer=True, mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst) + else: + tgt_mels = tgt_mels.transpose(1, 2) # [B, 80, T] + z, ret['kl'], ret['z_p'], ret['m_q'], ret['logs_q'] = self.fvae( + tgt_mels, tgt_nonpadding_BHT, cond=x, mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst) + if global_step < hparams['posterior_start_steps']: + z = torch.randn_like(z) + x_recon = self.fvae.decoder(z, nonpadding=tgt_nonpadding_BHT, cond=x).transpose(1, 2) + ret['pre_mel_out'] = x_recon + return x_recon + + def forward_dur(self, dur_input, mel2word, ret, **kwargs): + """ + + :param dur_input: [B, T_txt, H] + :param mel2ph: [B, T_mel] + :param txt_tokens: [B, T_txt] + :param ret: + :return: + """ + word_len = kwargs['word_len'] + ph2word = kwargs['ph2word'] + graph_lst = kwargs['graph_lst'] + etypes_lst = kwargs['etypes_lst'] + src_padding = dur_input.data.abs().sum(-1) == 0 + dur_input = dur_input.detach() + hparams['predictor_grad'] * (dur_input - dur_input.detach()) + dur = self.dur_predictor(dur_input, src_padding, ph2word, graph_lst, etypes_lst) + + B, T_ph = ph2word.shape + dur = torch.zeros([B, word_len.max() + 1]).to(ph2word.device).scatter_add(1, ph2word, dur) + dur = dur[:, 1:] + ret['dur'] = dur + if mel2word is None: + mel2word = self.length_regulator(dur).detach() + return mel2word + + def get_pos_embed(self, word2word, x2word): + x_pos = build_word_mask(word2word, x2word).float() # [B, T_word, T_ph] + x_pos = (x_pos.cumsum(-1) / x_pos.sum(-1).clamp(min=1)[..., None] * x_pos).sum(1) + x_pos = self.sin_pos(x_pos.float()) # [B, T_ph, H] + return x_pos + + def store_inverse_all(self): + def remove_weight_norm(m): + try: + if hasattr(m, 'store_inverse'): + m.store_inverse() + nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(remove_weight_norm) diff --git a/NeuralSeq/tasks/__pycache__/base_task.cpython-37.pyc b/NeuralSeq/tasks/__pycache__/base_task.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c38ad7f625a014f758733f260eca687c19a59c1a Binary files /dev/null and b/NeuralSeq/tasks/__pycache__/base_task.cpython-37.pyc differ diff --git a/NeuralSeq/tasks/__pycache__/base_task.cpython-38.pyc b/NeuralSeq/tasks/__pycache__/base_task.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c4bee9e9a48e97b59101c1802551344d20988ca Binary files /dev/null and b/NeuralSeq/tasks/__pycache__/base_task.cpython-38.pyc differ diff --git a/NeuralSeq/tasks/base_task.py b/NeuralSeq/tasks/base_task.py new file mode 100644 index 0000000000000000000000000000000000000000..b74d25c85ce8a86865c5d5a09f3f92579ffb2074 --- /dev/null +++ b/NeuralSeq/tasks/base_task.py @@ -0,0 +1,360 @@ +import glob +import re +import subprocess +from datetime import datetime + +import matplotlib + +matplotlib.use('Agg') + +from utils.hparams import hparams, set_hparams +import random +import sys +import numpy as np +import torch.distributed as dist +from pytorch_lightning.loggers import TensorBoardLogger +from utils.pl_utils import LatestModelCheckpoint, BaseTrainer, data_loader, DDP +from torch import nn +import torch.utils.data +import utils +import logging +import os + +torch.multiprocessing.set_sharing_strategy(os.getenv('TORCH_SHARE_STRATEGY', 'file_system')) + +log_format = '%(asctime)s %(message)s' +logging.basicConfig(stream=sys.stdout, level=logging.INFO, + format=log_format, datefmt='%m/%d %I:%M:%S %p') + + +class BaseDataset(torch.utils.data.Dataset): + def __init__(self, shuffle): + super().__init__() + self.hparams = hparams + self.shuffle = shuffle + self.sort_by_len = hparams['sort_by_len'] + self.sizes = None + + @property + def _sizes(self): + return self.sizes + + def __getitem__(self, index): + raise NotImplementedError + + def collater(self, samples): + raise NotImplementedError + + def __len__(self): + return len(self._sizes) + + def num_tokens(self, index): + return self.size(index) + + def size(self, index): + """Return an example's size as a float or tuple. This value is used when + filtering a dataset with ``--max-positions``.""" + size = min(self._sizes[index], hparams['max_frames']) + return size + + def ordered_indices(self): + """Return an ordered list of indices. Batches will be constructed based + on this order.""" + if self.shuffle: + indices = np.random.permutation(len(self)) + if self.sort_by_len: + indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')] + # 先random, 然后稳定排序, 保证排序后同长度的数据顺序是依照random permutation的 (被其随机打乱). + else: + indices = np.arange(len(self)) + return indices + + @property + def num_workers(self): + return int(os.getenv('NUM_WORKERS', hparams['ds_workers'])) + + +class BaseTask(nn.Module): + def __init__(self, *args, **kwargs): + # dataset configs + super(BaseTask, self).__init__(*args, **kwargs) + self.current_epoch = 0 + self.global_step = 0 + self.loaded_optimizer_states_dict = {} + self.trainer = None + self.logger = None + self.on_gpu = False + self.use_dp = False + self.use_ddp = False + self.example_input_array = None + + self.max_tokens = hparams['max_tokens'] + self.max_sentences = hparams['max_sentences'] + self.max_eval_tokens = hparams['max_eval_tokens'] + if self.max_eval_tokens == -1: + hparams['max_eval_tokens'] = self.max_eval_tokens = self.max_tokens + self.max_eval_sentences = hparams['max_eval_sentences'] + if self.max_eval_sentences == -1: + hparams['max_eval_sentences'] = self.max_eval_sentences = self.max_sentences + + self.model = None + self.training_losses_meter = None + + ########### + # Training, validation and testing + ########### + def build_model(self): + raise NotImplementedError + + def load_ckpt(self, ckpt_base_dir, current_model_name=None, model_name='model', force=True, strict=True): + # This function is updated on 2021.12.13 + if current_model_name is None: + current_model_name = model_name + utils.load_ckpt(self.__getattr__(current_model_name), ckpt_base_dir, current_model_name, force, strict) + + def on_epoch_start(self): + self.training_losses_meter = {'total_loss': utils.AvgrageMeter()} + + def _training_step(self, sample, batch_idx, optimizer_idx): + """ + + :param sample: + :param batch_idx: + :return: total loss: torch.Tensor, loss_log: dict + """ + raise NotImplementedError + + def training_step(self, sample, batch_idx, optimizer_idx=-1): + loss_ret = self._training_step(sample, batch_idx, optimizer_idx) + self.opt_idx = optimizer_idx + if loss_ret is None: + return {'loss': None} + total_loss, log_outputs = loss_ret + log_outputs = utils.tensors_to_scalars(log_outputs) + for k, v in log_outputs.items(): + if k not in self.training_losses_meter: + self.training_losses_meter[k] = utils.AvgrageMeter() + if not np.isnan(v): + self.training_losses_meter[k].update(v) + self.training_losses_meter['total_loss'].update(total_loss.item()) + + try: + log_outputs['lr'] = self.scheduler.get_lr() + if isinstance(log_outputs['lr'], list): + log_outputs['lr'] = log_outputs['lr'][0] + except: + pass + + # log_outputs['all_loss'] = total_loss.item() + progress_bar_log = log_outputs + tb_log = {f'tr/{k}': v for k, v in log_outputs.items()} + return { + 'loss': total_loss, + 'progress_bar': progress_bar_log, + 'log': tb_log + } + + def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx): + optimizer.step() + optimizer.zero_grad() + if self.scheduler is not None: + self.scheduler.step(self.global_step // hparams['accumulate_grad_batches']) + + def on_epoch_end(self): + loss_outputs = {k: round(v.avg, 4) for k, v in self.training_losses_meter.items()} + print(f"\n==============\n " + f"Epoch {self.current_epoch} ended. Steps: {self.global_step}. {loss_outputs}" + f"\n==============\n") + + def validation_step(self, sample, batch_idx): + """ + + :param sample: + :param batch_idx: + :return: output: dict + """ + raise NotImplementedError + + def _validation_end(self, outputs): + """ + + :param outputs: + :return: loss_output: dict + """ + raise NotImplementedError + + def validation_end(self, outputs): + loss_output = self._validation_end(outputs) + print(f"\n==============\n " + f"valid results: {loss_output}" + f"\n==============\n") + return { + 'log': {f'val/{k}': v for k, v in loss_output.items()}, + 'val_loss': loss_output['total_loss'] + } + + def build_scheduler(self, optimizer): + raise NotImplementedError + + def build_optimizer(self, model): + raise NotImplementedError + + def configure_optimizers(self): + optm = self.build_optimizer(self.model) + self.scheduler = self.build_scheduler(optm) + return [optm] + + def test_start(self): + pass + + def test_step(self, sample, batch_idx): + return self.validation_step(sample, batch_idx) + + def test_end(self, outputs): + return self.validation_end(outputs) + + ########### + # Running configuration + ########### + + @classmethod + def start(cls): + set_hparams() + os.environ['MASTER_PORT'] = str(random.randint(15000, 30000)) + random.seed(hparams['seed']) + np.random.seed(hparams['seed']) + task = cls() + work_dir = hparams['work_dir'] + trainer = BaseTrainer(checkpoint_callback=LatestModelCheckpoint( + filepath=work_dir, + verbose=True, + monitor='val_loss', + mode='min', + num_ckpt_keep=hparams['num_ckpt_keep'], + save_best=hparams['save_best'], + period=1 if hparams['save_ckpt'] else 100000 + ), + logger=TensorBoardLogger( + save_dir=work_dir, + name='lightning_logs', + version='lastest' + ), + gradient_clip_val=hparams['clip_grad_norm'], + val_check_interval=hparams['val_check_interval'], + row_log_interval=hparams['log_interval'], + max_updates=hparams['max_updates'], + num_sanity_val_steps=hparams['num_sanity_val_steps'] if not hparams[ + 'validate'] else 10000, + accumulate_grad_batches=hparams['accumulate_grad_batches']) + if not hparams['infer']: # train + t = datetime.now().strftime('%Y%m%d%H%M%S') + code_dir = f'{work_dir}/codes/{t}' + subprocess.check_call(f'mkdir -p "{code_dir}"', shell=True) + for c in hparams['save_codes']: + subprocess.check_call(f'cp -r "{c}" "{code_dir}/"', shell=True) + print(f"| Copied codes to {code_dir}.") + trainer.checkpoint_callback.task = task + trainer.fit(task) + else: + trainer.test(task) + + def configure_ddp(self, model, device_ids): + model = DDP( + model, + device_ids=device_ids, + find_unused_parameters=True + ) + if dist.get_rank() != 0 and not hparams['debug']: + sys.stdout = open(os.devnull, "w") + sys.stderr = open(os.devnull, "w") + random.seed(hparams['seed']) + np.random.seed(hparams['seed']) + return model + + def training_end(self, *args, **kwargs): + return None + + def init_ddp_connection(self, proc_rank, world_size): + set_hparams(print_hparams=False) + # guarantees unique ports across jobs from same grid search + default_port = 12910 + # if user gave a port number, use that one instead + try: + default_port = os.environ['MASTER_PORT'] + except Exception: + os.environ['MASTER_PORT'] = str(default_port) + + # figure out the root node addr + root_node = '127.0.0.2' + root_node = self.trainer.resolve_root_node_address(root_node) + os.environ['MASTER_ADDR'] = root_node + dist.init_process_group('nccl', rank=proc_rank, world_size=world_size) + + @data_loader + def train_dataloader(self): + return None + + @data_loader + def test_dataloader(self): + return None + + @data_loader + def val_dataloader(self): + return None + + def on_load_checkpoint(self, checkpoint): + pass + + def on_save_checkpoint(self, checkpoint): + pass + + def on_sanity_check_start(self): + pass + + def on_train_start(self): + pass + + def on_train_end(self): + pass + + def on_batch_start(self, batch): + pass + + def on_batch_end(self): + pass + + def on_pre_performance_check(self): + pass + + def on_post_performance_check(self): + pass + + def on_before_zero_grad(self, optimizer): + pass + + def on_after_backward(self): + pass + + def backward(self, loss, optimizer): + loss.backward() + + def grad_norm(self, norm_type): + results = {} + total_norm = 0 + for name, p in self.named_parameters(): + if p.requires_grad: + try: + param_norm = p.grad.data.norm(norm_type) + total_norm += param_norm ** norm_type + norm = param_norm ** (1 / norm_type) + + grad = round(norm.data.cpu().numpy().flatten()[0], 3) + results['grad_{}_norm_{}'.format(norm_type, name)] = grad + except Exception: + # this param had no grad + pass + + total_norm = total_norm ** (1. / norm_type) + grad = round(total_norm.data.cpu().numpy().flatten()[0], 3) + results['grad_{}_norm_total'.format(norm_type)] = grad + return results diff --git a/NeuralSeq/tasks/run.py b/NeuralSeq/tasks/run.py new file mode 100644 index 0000000000000000000000000000000000000000..82c7559cec873eebf7c2c0ab6554895e21de7e7c --- /dev/null +++ b/NeuralSeq/tasks/run.py @@ -0,0 +1,15 @@ +import importlib +from utils.hparams import set_hparams, hparams + + +def run_task(): + assert hparams['task_cls'] != '' + pkg = ".".join(hparams["task_cls"].split(".")[:-1]) + cls_name = hparams["task_cls"].split(".")[-1] + task_cls = getattr(importlib.import_module(pkg), cls_name) + task_cls.start() + + +if __name__ == '__main__': + set_hparams() + run_task() diff --git a/NeuralSeq/tasks/svs/__init__.py b/NeuralSeq/tasks/svs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/NeuralSeq/tasks/svs/diffsinger_task.py b/NeuralSeq/tasks/svs/diffsinger_task.py new file mode 100644 index 0000000000000000000000000000000000000000..78e65447fe78ca80bd0e67ebe5581794fc3764aa --- /dev/null +++ b/NeuralSeq/tasks/svs/diffsinger_task.py @@ -0,0 +1,490 @@ +import torch + +import utils +from utils.hparams import hparams +from modules.diff.net import DiffNet +from modules.diff.shallow_diffusion_tts import GaussianDiffusion, OfflineGaussianDiffusion +from tasks.svs.diffspeech_task import DiffSpeechTask +from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder +from modules.fastspeech.pe import PitchExtractor +from modules.fastspeech.fs2 import FastSpeech2 +from modules.diffsinger_midi.fs2 import FastSpeech2MIDI +from modules.fastspeech.tts_modules import mel2ph_to_dur + +from modules.diff.candidate_decoder import FFT +from utils.pitch_utils import denorm_f0 +from tasks.tts.fs2_utils import FastSpeechDataset +from tasks.tts.fs2 import FastSpeech2Task + +import numpy as np +import os +import torch.nn.functional as F + +DIFF_DECODERS = { + 'wavenet': lambda hp: DiffNet(hp['audio_num_mel_bins']), + 'fft': lambda hp: FFT( + hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']), +} + + +class DiffSingerTask(DiffSpeechTask): + def __init__(self): + super(DiffSingerTask, self).__init__() + self.dataset_cls = FastSpeechDataset + self.vocoder: BaseVocoder = get_vocoder_cls(hparams)() + if hparams.get('pe_enable') is not None and hparams['pe_enable']: + self.pe = PitchExtractor().cuda() + utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True) + self.pe.eval() + + def build_tts_model(self): + # import torch + # from tqdm import tqdm + # v_min = torch.ones([80]) * 100 + # v_max = torch.ones([80]) * -100 + # for i, ds in enumerate(tqdm(self.dataset_cls('train'))): + # v_max = torch.max(torch.max(ds['mel'].reshape(-1, 80), 0)[0], v_max) + # v_min = torch.min(torch.min(ds['mel'].reshape(-1, 80), 0)[0], v_min) + # if i % 100 == 0: + # print(i, v_min, v_max) + # print('final', v_min, v_max) + mel_bins = hparams['audio_num_mel_bins'] + self.model = GaussianDiffusion( + phone_encoder=self.phone_encoder, + out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams), + timesteps=hparams['timesteps'], + K_step=hparams['K_step'], + loss_type=hparams['diff_loss_type'], + spec_min=hparams['spec_min'], spec_max=hparams['spec_max'], + ) + if hparams['fs2_ckpt'] != '': + utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True) + # self.model.fs2.decoder = None + for k, v in self.model.fs2.named_parameters(): + v.requires_grad = False + + def validation_step(self, sample, batch_idx): + outputs = {} + txt_tokens = sample['txt_tokens'] # [B, T_t] + + target = sample['mels'] # [B, T_s, 80] + energy = sample['energy'] + # fs2_mel = sample['fs2_mels'] + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + mel2ph = sample['mel2ph'] + f0 = sample['f0'] + uv = sample['uv'] + + outputs['losses'] = {} + + outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False) + + + outputs['total_loss'] = sum(outputs['losses'].values()) + outputs['nsamples'] = sample['nsamples'] + outputs = utils.tensors_to_scalars(outputs) + if batch_idx < hparams['num_valid_plots']: + model_out = self.model( + txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy, ref_mels=None, infer=True) + + if hparams.get('pe_enable') is not None and hparams['pe_enable']: + gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel + pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel + else: + gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams) + pred_f0 = model_out.get('f0_denorm') + self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}') + self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'], name=f'fs2mel_{batch_idx}') + return outputs + + +class ShallowDiffusionOfflineDataset(FastSpeechDataset): + def __getitem__(self, index): + sample = super(ShallowDiffusionOfflineDataset, self).__getitem__(index) + item = self._get_item(index) + + if self.prefix != 'train' and hparams['fs2_ckpt'] != '': + fs2_ckpt = os.path.dirname(hparams['fs2_ckpt']) + item_name = item['item_name'] + fs2_mel = torch.Tensor(np.load(f'{fs2_ckpt}/P_mels_npy/{item_name}.npy')) # ~M generated by FFT-singer. + sample['fs2_mel'] = fs2_mel + return sample + + def collater(self, samples): + batch = super(ShallowDiffusionOfflineDataset, self).collater(samples) + if self.prefix != 'train' and hparams['fs2_ckpt'] != '': + batch['fs2_mels'] = utils.collate_2d([s['fs2_mel'] for s in samples], 0.0) + return batch + + +class DiffSingerOfflineTask(DiffSingerTask): + def __init__(self): + super(DiffSingerOfflineTask, self).__init__() + self.dataset_cls = ShallowDiffusionOfflineDataset + + def build_tts_model(self): + mel_bins = hparams['audio_num_mel_bins'] + self.model = OfflineGaussianDiffusion( + phone_encoder=self.phone_encoder, + out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams), + timesteps=hparams['timesteps'], + K_step=hparams['K_step'], + loss_type=hparams['diff_loss_type'], + spec_min=hparams['spec_min'], spec_max=hparams['spec_max'], + ) + # if hparams['fs2_ckpt'] != '': + # utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True) + # self.model.fs2.decoder = None + + def run_model(self, model, sample, return_output=False, infer=False): + txt_tokens = sample['txt_tokens'] # [B, T_t] + target = sample['mels'] # [B, T_s, 80] + mel2ph = sample['mel2ph'] # [B, T_s] + f0 = sample['f0'] + uv = sample['uv'] + energy = sample['energy'] + fs2_mel = None #sample['fs2_mels'] + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + if hparams['pitch_type'] == 'cwt': + cwt_spec = sample[f'cwt_spec'] + f0_mean = sample['f0_mean'] + f0_std = sample['f0_std'] + sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph) + + output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, + ref_mels=[target, fs2_mel], f0=f0, uv=uv, energy=energy, infer=infer) + + losses = {} + if 'diff_loss' in output: + losses['mel'] = output['diff_loss'] + # self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses) + # if hparams['use_pitch_embed']: + # self.add_pitch_loss(output, sample, losses) + if hparams['use_energy_embed']: + self.add_energy_loss(output['energy_pred'], energy, losses) + + if not return_output: + return losses + else: + return losses, output + + def validation_step(self, sample, batch_idx): + outputs = {} + txt_tokens = sample['txt_tokens'] # [B, T_t] + + target = sample['mels'] # [B, T_s, 80] + energy = sample['energy'] + # fs2_mel = sample['fs2_mels'] + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + mel2ph = sample['mel2ph'] + f0 = sample['f0'] + uv = sample['uv'] + + outputs['losses'] = {} + + outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False) + + + outputs['total_loss'] = sum(outputs['losses'].values()) + outputs['nsamples'] = sample['nsamples'] + outputs = utils.tensors_to_scalars(outputs) + if batch_idx < hparams['num_valid_plots']: + fs2_mel = sample['fs2_mels'] + model_out = self.model( + txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy, + ref_mels=[None, fs2_mel], infer=True) + if hparams.get('pe_enable') is not None and hparams['pe_enable']: + gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel + pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel + else: + gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams) + pred_f0 = model_out.get('f0_denorm') + self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}') + self.plot_mel(batch_idx, sample['mels'], fs2_mel, name=f'fs2mel_{batch_idx}') + return outputs + + def test_step(self, sample, batch_idx): + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + txt_tokens = sample['txt_tokens'] + energy = sample['energy'] + if hparams['profile_infer']: + pass + else: + mel2ph, uv, f0 = None, None, None + if hparams['use_gt_dur']: + mel2ph = sample['mel2ph'] + if hparams['use_gt_f0']: + f0 = sample['f0'] + uv = sample['uv'] + fs2_mel = sample['fs2_mels'] + outputs = self.model( + txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, ref_mels=[None, fs2_mel], energy=energy, + infer=True) + sample['outputs'] = self.model.out2mel(outputs['mel_out']) + sample['mel2ph_pred'] = outputs['mel2ph'] + + if hparams.get('pe_enable') is not None and hparams['pe_enable']: + sample['f0'] = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel + sample['f0_pred'] = self.pe(sample['outputs'])['f0_denorm_pred'] # pe predict from Pred mel + else: + sample['f0'] = denorm_f0(sample['f0'], sample['uv'], hparams) + sample['f0_pred'] = outputs.get('f0_denorm') + return self.after_infer(sample) + + +class MIDIDataset(FastSpeechDataset): + def __getitem__(self, index): + sample = super(MIDIDataset, self).__getitem__(index) + item = self._get_item(index) + sample['f0_midi'] = torch.FloatTensor(item['f0_midi']) + sample['pitch_midi'] = torch.LongTensor(item['pitch_midi'])[:hparams['max_frames']] + + return sample + + def collater(self, samples): + batch = super(MIDIDataset, self).collater(samples) + batch['f0_midi'] = utils.collate_1d([s['f0_midi'] for s in samples], 0.0) + batch['pitch_midi'] = utils.collate_1d([s['pitch_midi'] for s in samples], 0) + # print((batch['pitch_midi'] == f0_to_coarse(batch['f0_midi'])).all()) + return batch + + +class OpencpopDataset(FastSpeechDataset): + def __getitem__(self, index): + sample = super(OpencpopDataset, self).__getitem__(index) + item = self._get_item(index) + sample['pitch_midi'] = torch.LongTensor(item['pitch_midi'])[:hparams['max_frames']] + sample['midi_dur'] = torch.FloatTensor(item['midi_dur'])[:hparams['max_frames']] + sample['is_slur'] = torch.LongTensor(item['is_slur'])[:hparams['max_frames']] + sample['word_boundary'] = torch.LongTensor(item['word_boundary'])[:hparams['max_frames']] + return sample + + def collater(self, samples): + batch = super(OpencpopDataset, self).collater(samples) + batch['pitch_midi'] = utils.collate_1d([s['pitch_midi'] for s in samples], 0) + batch['midi_dur'] = utils.collate_1d([s['midi_dur'] for s in samples], 0) + batch['is_slur'] = utils.collate_1d([s['is_slur'] for s in samples], 0) + batch['word_boundary'] = utils.collate_1d([s['word_boundary'] for s in samples], 0) + return batch + + +class DiffSingerMIDITask(DiffSingerTask): + def __init__(self): + super(DiffSingerMIDITask, self).__init__() + # self.dataset_cls = MIDIDataset + self.dataset_cls = OpencpopDataset + + def run_model(self, model, sample, return_output=False, infer=False): + txt_tokens = sample['txt_tokens'] # [B, T_t] + target = sample['mels'] # [B, T_s, 80] + # mel2ph = sample['mel2ph'] if hparams['use_gt_dur'] else None # [B, T_s] + mel2ph = sample['mel2ph'] + if hparams.get('switch_midi2f0_step') is not None and self.global_step > hparams['switch_midi2f0_step']: + f0 = None + uv = None + else: + f0 = sample['f0'] + uv = sample['uv'] + energy = sample['energy'] + + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + if hparams['pitch_type'] == 'cwt': + cwt_spec = sample[f'cwt_spec'] + f0_mean = sample['f0_mean'] + f0_std = sample['f0_std'] + sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph) + + output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, + ref_mels=target, f0=f0, uv=uv, energy=energy, infer=infer, pitch_midi=sample['pitch_midi'], + midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur')) + + losses = {} + if 'diff_loss' in output: + losses['mel'] = output['diff_loss'] + self.add_dur_loss(output['dur'], mel2ph, txt_tokens, sample['word_boundary'], losses=losses) + if hparams['use_pitch_embed']: + self.add_pitch_loss(output, sample, losses) + if hparams['use_energy_embed']: + self.add_energy_loss(output['energy_pred'], energy, losses) + if not return_output: + return losses + else: + return losses, output + + def validation_step(self, sample, batch_idx): + outputs = {} + txt_tokens = sample['txt_tokens'] # [B, T_t] + + target = sample['mels'] # [B, T_s, 80] + energy = sample['energy'] + # fs2_mel = sample['fs2_mels'] + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + mel2ph = sample['mel2ph'] + + outputs['losses'] = {} + + outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False) + + outputs['total_loss'] = sum(outputs['losses'].values()) + outputs['nsamples'] = sample['nsamples'] + outputs = utils.tensors_to_scalars(outputs) + if batch_idx < hparams['num_valid_plots']: + model_out = self.model( + txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=None, uv=None, energy=energy, ref_mels=None, infer=True, + pitch_midi=sample['pitch_midi'], midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur')) + + if hparams.get('pe_enable') is not None and hparams['pe_enable']: + gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel + pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel + else: + gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams) + pred_f0 = model_out.get('f0_denorm') + self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}') + self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'], name=f'fs2mel_{batch_idx}') + if hparams['use_pitch_embed']: + self.plot_pitch(batch_idx, sample, model_out) + return outputs + + def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, wdb, losses=None): + """ + :param dur_pred: [B, T], float, log scale + :param mel2ph: [B, T] + :param txt_tokens: [B, T] + :param losses: + :return: + """ + B, T = txt_tokens.shape + nonpadding = (txt_tokens != 0).float() + dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding + is_sil = torch.zeros_like(txt_tokens).bool() + for p in self.sil_ph: + is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0]) + is_sil = is_sil.float() # [B, T_txt] + + # phone duration loss + if hparams['dur_loss'] == 'mse': + losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none') + losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum() + dur_pred = (dur_pred.exp() - 1).clamp(min=0) + else: + raise NotImplementedError + + # use linear scale for sent and word duration + if hparams['lambda_word_dur'] > 0: + idx = F.pad(wdb.cumsum(axis=1), (1, 0))[:, :-1] + # word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_(1, idx, midi_dur) # midi_dur can be implied by add gt-ph_dur + word_dur_p = dur_pred.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_pred) + word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_gt) + wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none') + word_nonpadding = (word_dur_g > 0).float() + wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum() + losses['wdur'] = wdur_loss * hparams['lambda_word_dur'] + if hparams['lambda_sent_dur'] > 0: + sent_dur_p = dur_pred.sum(-1) + sent_dur_g = dur_gt.sum(-1) + sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean') + losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] + + +class AuxDecoderMIDITask(FastSpeech2Task): + def __init__(self): + super().__init__() + # self.dataset_cls = MIDIDataset + self.dataset_cls = OpencpopDataset + + def build_tts_model(self): + if hparams.get('use_midi') is not None and hparams['use_midi']: + self.model = FastSpeech2MIDI(self.phone_encoder) + else: + self.model = FastSpeech2(self.phone_encoder) + + def run_model(self, model, sample, return_output=False): + txt_tokens = sample['txt_tokens'] # [B, T_t] + target = sample['mels'] # [B, T_s, 80] + mel2ph = sample['mel2ph'] # [B, T_s] + f0 = sample['f0'] + uv = sample['uv'] + energy = sample['energy'] + + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + if hparams['pitch_type'] == 'cwt': + cwt_spec = sample[f'cwt_spec'] + f0_mean = sample['f0_mean'] + f0_std = sample['f0_std'] + sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph) + + output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, + ref_mels=target, f0=f0, uv=uv, energy=energy, infer=False, pitch_midi=sample['pitch_midi'], + midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur')) + + losses = {} + self.add_mel_loss(output['mel_out'], target, losses) + self.add_dur_loss(output['dur'], mel2ph, txt_tokens, sample['word_boundary'], losses=losses) + if hparams['use_pitch_embed']: + self.add_pitch_loss(output, sample, losses) + if hparams['use_energy_embed']: + self.add_energy_loss(output['energy_pred'], energy, losses) + if not return_output: + return losses + else: + return losses, output + + def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, wdb, losses=None): + """ + :param dur_pred: [B, T], float, log scale + :param mel2ph: [B, T] + :param txt_tokens: [B, T] + :param losses: + :return: + """ + B, T = txt_tokens.shape + nonpadding = (txt_tokens != 0).float() + dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding + is_sil = torch.zeros_like(txt_tokens).bool() + for p in self.sil_ph: + is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0]) + is_sil = is_sil.float() # [B, T_txt] + + # phone duration loss + if hparams['dur_loss'] == 'mse': + losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none') + losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum() + dur_pred = (dur_pred.exp() - 1).clamp(min=0) + else: + raise NotImplementedError + + # use linear scale for sent and word duration + if hparams['lambda_word_dur'] > 0: + idx = F.pad(wdb.cumsum(axis=1), (1, 0))[:, :-1] + # word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_(1, idx, midi_dur) # midi_dur can be implied by add gt-ph_dur + word_dur_p = dur_pred.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_pred) + word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_gt) + wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none') + word_nonpadding = (word_dur_g > 0).float() + wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum() + losses['wdur'] = wdur_loss * hparams['lambda_word_dur'] + if hparams['lambda_sent_dur'] > 0: + sent_dur_p = dur_pred.sum(-1) + sent_dur_g = dur_gt.sum(-1) + sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean') + losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] + + def validation_step(self, sample, batch_idx): + outputs = {} + outputs['losses'] = {} + outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True) + outputs['total_loss'] = sum(outputs['losses'].values()) + outputs['nsamples'] = sample['nsamples'] + mel_out = self.model.out2mel(model_out['mel_out']) + outputs = utils.tensors_to_scalars(outputs) + # if sample['mels'].shape[0] == 1: + # self.add_laplace_var(mel_out, sample['mels'], outputs) + if batch_idx < hparams['num_valid_plots']: + self.plot_mel(batch_idx, sample['mels'], mel_out) + self.plot_dur(batch_idx, sample, model_out) + if hparams['use_pitch_embed']: + self.plot_pitch(batch_idx, sample, model_out) + return outputs \ No newline at end of file diff --git a/NeuralSeq/tasks/svs/diffspeech_task.py b/NeuralSeq/tasks/svs/diffspeech_task.py new file mode 100644 index 0000000000000000000000000000000000000000..cf303d54ad882bea00ad02a0191bd0a71e3452fb --- /dev/null +++ b/NeuralSeq/tasks/svs/diffspeech_task.py @@ -0,0 +1,122 @@ +import torch + +import utils +from utils.hparams import hparams +from modules.diff.net import DiffNet +from modules.diff.shallow_diffusion_tts import GaussianDiffusion +from tasks.svs.task import DiffFsTask +from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder +from utils.pitch_utils import denorm_f0 +from tasks.tts.fs2_utils import FastSpeechDataset + +DIFF_DECODERS = { + 'wavenet': lambda hp: DiffNet(hp['audio_num_mel_bins']), +} + + +class DiffSpeechTask(DiffFsTask): + def __init__(self): + super(DiffSpeechTask, self).__init__() + self.dataset_cls = FastSpeechDataset + self.vocoder: BaseVocoder = get_vocoder_cls(hparams)() + + def build_tts_model(self): + mel_bins = hparams['audio_num_mel_bins'] + self.model = GaussianDiffusion( + phone_encoder=self.phone_encoder, + out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams), + timesteps=hparams['timesteps'], + K_step=hparams['K_step'], + loss_type=hparams['diff_loss_type'], + spec_min=hparams['spec_min'], spec_max=hparams['spec_max'], + ) + if hparams['fs2_ckpt'] != '': + utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True) + # self.model.fs2.decoder = None + for k, v in self.model.fs2.named_parameters(): + if not 'predictor' in k: + v.requires_grad = False + + def build_optimizer(self, model): + self.optimizer = optimizer = torch.optim.AdamW( + filter(lambda p: p.requires_grad, model.parameters()), + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + return optimizer + + def run_model(self, model, sample, return_output=False, infer=False): + txt_tokens = sample['txt_tokens'] # [B, T_t] + target = sample['mels'] # [B, T_s, 80] + # mel2ph = sample['mel2ph'] if hparams['use_gt_dur'] else None # [B, T_s] + mel2ph = sample['mel2ph'] + f0 = sample['f0'] + uv = sample['uv'] + energy = sample['energy'] + # fs2_mel = sample['fs2_mels'] + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + if hparams['pitch_type'] == 'cwt': + cwt_spec = sample[f'cwt_spec'] + f0_mean = sample['f0_mean'] + f0_std = sample['f0_std'] + sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph) + + output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, + ref_mels=target, f0=f0, uv=uv, energy=energy, infer=infer) + + losses = {} + if 'diff_loss' in output: + losses['mel'] = output['diff_loss'] + self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses) + if hparams['use_pitch_embed']: + self.add_pitch_loss(output, sample, losses) + if hparams['use_energy_embed']: + self.add_energy_loss(output['energy_pred'], energy, losses) + if not return_output: + return losses + else: + return losses, output + + def validation_step(self, sample, batch_idx): + outputs = {} + txt_tokens = sample['txt_tokens'] # [B, T_t] + + energy = sample['energy'] + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + mel2ph = sample['mel2ph'] + f0 = sample['f0'] + uv = sample['uv'] + + outputs['losses'] = {} + + outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False) + + + outputs['total_loss'] = sum(outputs['losses'].values()) + outputs['nsamples'] = sample['nsamples'] + outputs = utils.tensors_to_scalars(outputs) + if batch_idx < hparams['num_valid_plots']: + # model_out = self.model( + # txt_tokens, spk_embed=spk_embed, mel2ph=None, f0=None, uv=None, energy=None, ref_mels=None, infer=True) + # self.plot_mel(batch_idx, model_out['mel_out'], model_out['fs2_mel'], name=f'diffspeech_vs_fs2_{batch_idx}') + model_out = self.model( + txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy, ref_mels=None, infer=True) + gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams) + self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=model_out.get('f0_denorm')) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out']) + return outputs + + ############ + # validation plots + ############ + def plot_wav(self, batch_idx, gt_wav, wav_out, is_mel=False, gt_f0=None, f0=None, name=None): + gt_wav = gt_wav[0].cpu().numpy() + wav_out = wav_out[0].cpu().numpy() + gt_f0 = gt_f0[0].cpu().numpy() + f0 = f0[0].cpu().numpy() + if is_mel: + gt_wav = self.vocoder.spec2wav(gt_wav, f0=gt_f0) + wav_out = self.vocoder.spec2wav(wav_out, f0=f0) + self.logger.experiment.add_audio(f'gt_{batch_idx}', gt_wav, sample_rate=hparams['audio_sample_rate'], global_step=self.global_step) + self.logger.experiment.add_audio(f'wav_{batch_idx}', wav_out, sample_rate=hparams['audio_sample_rate'], global_step=self.global_step) + diff --git a/NeuralSeq/tasks/svs/task.py b/NeuralSeq/tasks/svs/task.py new file mode 100644 index 0000000000000000000000000000000000000000..896970e5318071d0d406f3d2378462cb77356925 --- /dev/null +++ b/NeuralSeq/tasks/svs/task.py @@ -0,0 +1,84 @@ +import torch + +import utils +from modules.diff.diffusion import GaussianDiffusion +from modules.diff.net import DiffNet +from tasks.tts.fs2 import FastSpeech2Task +from utils.hparams import hparams + + +DIFF_DECODERS = { + 'wavenet': lambda hp: DiffNet(hp['audio_num_mel_bins']), +} + + +class DiffFsTask(FastSpeech2Task): + def build_tts_model(self): + mel_bins = hparams['audio_num_mel_bins'] + self.model = GaussianDiffusion( + phone_encoder=self.phone_encoder, + out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams), + timesteps=hparams['timesteps'], + loss_type=hparams['diff_loss_type'], + spec_min=hparams['spec_min'], spec_max=hparams['spec_max'], + ) + + def run_model(self, model, sample, return_output=False, infer=False): + txt_tokens = sample['txt_tokens'] # [B, T_t] + target = sample['mels'] # [B, T_s, 80] + mel2ph = sample['mel2ph'] # [B, T_s] + f0 = sample['f0'] + uv = sample['uv'] + energy = sample['energy'] + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + if hparams['pitch_type'] == 'cwt': + cwt_spec = sample[f'cwt_spec'] + f0_mean = sample['f0_mean'] + f0_std = sample['f0_std'] + sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph) + + output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, + ref_mels=target, f0=f0, uv=uv, energy=energy, infer=infer) + + losses = {} + if 'diff_loss' in output: + losses['mel'] = output['diff_loss'] + self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses) + if hparams['use_pitch_embed']: + self.add_pitch_loss(output, sample, losses) + if hparams['use_energy_embed']: + self.add_energy_loss(output['energy_pred'], energy, losses) + if not return_output: + return losses + else: + return losses, output + + def _training_step(self, sample, batch_idx, _): + log_outputs = self.run_model(self.model, sample) + total_loss = sum([v for v in log_outputs.values() if isinstance(v, torch.Tensor) and v.requires_grad]) + log_outputs['batch_size'] = sample['txt_tokens'].size()[0] + log_outputs['lr'] = self.scheduler.get_lr()[0] + return total_loss, log_outputs + + def validation_step(self, sample, batch_idx): + outputs = {} + outputs['losses'] = {} + outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False) + outputs['total_loss'] = sum(outputs['losses'].values()) + outputs['nsamples'] = sample['nsamples'] + outputs = utils.tensors_to_scalars(outputs) + if batch_idx < hparams['num_valid_plots']: + _, model_out = self.run_model(self.model, sample, return_output=True, infer=True) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out']) + return outputs + + def build_scheduler(self, optimizer): + return torch.optim.lr_scheduler.StepLR(optimizer, hparams['decay_steps'], gamma=0.5) + + def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx): + if optimizer is None: + return + optimizer.step() + optimizer.zero_grad() + if self.scheduler is not None: + self.scheduler.step(self.global_step // hparams['accumulate_grad_batches']) diff --git a/NeuralSeq/tasks/tts/__pycache__/dataset_utils.cpython-38.pyc b/NeuralSeq/tasks/tts/__pycache__/dataset_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bffa151ec062fbbfd7746f13f195945c2f6522af Binary files /dev/null and b/NeuralSeq/tasks/tts/__pycache__/dataset_utils.cpython-38.pyc differ diff --git a/NeuralSeq/tasks/tts/__pycache__/fs2.cpython-37.pyc b/NeuralSeq/tasks/tts/__pycache__/fs2.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41d1fe773e827fb7e998e0b5c73eda16e45f4d11 Binary files /dev/null and b/NeuralSeq/tasks/tts/__pycache__/fs2.cpython-37.pyc differ diff --git a/NeuralSeq/tasks/tts/__pycache__/fs2.cpython-38.pyc b/NeuralSeq/tasks/tts/__pycache__/fs2.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..818311e1307b764fc666b13d17c5e76cb857da26 Binary files /dev/null and b/NeuralSeq/tasks/tts/__pycache__/fs2.cpython-38.pyc differ diff --git a/NeuralSeq/tasks/tts/__pycache__/fs2_utils.cpython-37.pyc b/NeuralSeq/tasks/tts/__pycache__/fs2_utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8695c13045b91438396bacf0448ed8350b354aa5 Binary files /dev/null and b/NeuralSeq/tasks/tts/__pycache__/fs2_utils.cpython-37.pyc differ diff --git a/NeuralSeq/tasks/tts/__pycache__/fs2_utils.cpython-38.pyc b/NeuralSeq/tasks/tts/__pycache__/fs2_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56d3d4b025876057d8d4c67305861ee146d5917c Binary files /dev/null and b/NeuralSeq/tasks/tts/__pycache__/fs2_utils.cpython-38.pyc differ diff --git a/NeuralSeq/tasks/tts/__pycache__/tts.cpython-37.pyc b/NeuralSeq/tasks/tts/__pycache__/tts.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..923a8cbd1eed14df5f1121a509ee5561fd815c83 Binary files /dev/null and b/NeuralSeq/tasks/tts/__pycache__/tts.cpython-37.pyc differ diff --git a/NeuralSeq/tasks/tts/__pycache__/tts.cpython-38.pyc b/NeuralSeq/tasks/tts/__pycache__/tts.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbb08d593628c30b0e0e90ab9f8529c5b538716b Binary files /dev/null and b/NeuralSeq/tasks/tts/__pycache__/tts.cpython-38.pyc differ diff --git a/NeuralSeq/tasks/tts/__pycache__/tts_utils.cpython-38.pyc b/NeuralSeq/tasks/tts/__pycache__/tts_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69242952c919f4ae9f55ca244b05a92f53f825cc Binary files /dev/null and b/NeuralSeq/tasks/tts/__pycache__/tts_utils.cpython-38.pyc differ diff --git a/NeuralSeq/tasks/tts/dataset_utils.py b/NeuralSeq/tasks/tts/dataset_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9e31ce3aba637a5c373caf1559310ec029338533 --- /dev/null +++ b/NeuralSeq/tasks/tts/dataset_utils.py @@ -0,0 +1,259 @@ +from utils.cwt import get_lf0_cwt +import torch.optim +import torch.utils.data +import importlib +from utils.indexed_datasets import IndexedDataset +from utils.pitch_utils import norm_interp_f0, denorm_f0, f0_to_coarse +import numpy as np +from tasks.base_task import BaseDataset +import torch +import torch.optim +import torch.utils.data +import utils +import torch.distributions +from utils.hparams import hparams +from resemblyzer import VoiceEncoder +import json +from data_gen.tts.data_gen_utils import build_phone_encoder + +class BaseTTSDataset(BaseDataset): + def __init__(self, prefix, shuffle=False, test_items=None, test_sizes=None, data_dir=None): + super().__init__(shuffle) + self.data_dir = hparams['binary_data_dir'] if data_dir is None else data_dir + self.prefix = prefix + self.hparams = hparams + self.indexed_ds = None + self.ext_mel2ph = None + + def load_size(): + self.sizes = np.load(f'{self.data_dir}/{self.prefix}_lengths.npy') + + if prefix == 'test': + if test_items is not None: + self.indexed_ds, self.sizes = test_items, test_sizes + else: + load_size() + if hparams['num_test_samples'] > 0: + self.avail_idxs = [x for x in range(hparams['num_test_samples']) \ + if x < len(self.sizes)] + if len(hparams['test_ids']) > 0: + self.avail_idxs = hparams['test_ids'] + self.avail_idxs + else: + self.avail_idxs = list(range(len(self.sizes))) + else: + load_size() + self.avail_idxs = list(range(len(self.sizes))) + + if hparams['min_frames'] > 0: + self.avail_idxs = [ + x for x in self.avail_idxs if self.sizes[x] >= hparams['min_frames']] + self.sizes = [self.sizes[i] for i in self.avail_idxs] + + def _get_item(self, index): + if hasattr(self, 'avail_idxs') and self.avail_idxs is not None: + index = self.avail_idxs[index] + if self.indexed_ds is None: + self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}') + return self.indexed_ds[index] + + def __getitem__(self, index): + hparams = self.hparams + item = self._get_item(index) + assert len(item['mel']) == self.sizes[index], (len(item['mel']), self.sizes[index]) + max_frames = hparams['max_frames'] + spec = torch.Tensor(item['mel'])[:max_frames] + max_frames = spec.shape[0] // hparams['frames_multiple'] * hparams['frames_multiple'] + spec = spec[:max_frames] + phone = torch.LongTensor(item['phone'][:hparams['max_input_tokens']]) + sample = { + "id": index, + "item_name": item['item_name'], + "text": item['txt'], + "txt_token": phone, + "mel": spec, + "mel_nonpadding": spec.abs().sum(-1) > 0, + } + if hparams['use_spk_embed']: + sample["spk_embed"] = torch.Tensor(item['spk_embed']) + if hparams['use_spk_id']: + sample["spk_id"] = int(item['spk_id']) + return sample + + def collater(self, samples): + if len(samples) == 0: + return {} + hparams = self.hparams + id = torch.LongTensor([s['id'] for s in samples]) + item_names = [s['item_name'] for s in samples] + text = [s['text'] for s in samples] + txt_tokens = utils.collate_1d([s['txt_token'] for s in samples], 0) + mels = utils.collate_2d([s['mel'] for s in samples], 0.0) + txt_lengths = torch.LongTensor([s['txt_token'].numel() for s in samples]) + mel_lengths = torch.LongTensor([s['mel'].shape[0] for s in samples]) + + batch = { + 'id': id, + 'item_name': item_names, + 'nsamples': len(samples), + 'text': text, + 'txt_tokens': txt_tokens, + 'txt_lengths': txt_lengths, + 'mels': mels, + 'mel_lengths': mel_lengths, + } + + if hparams['use_spk_embed']: + spk_embed = torch.stack([s['spk_embed'] for s in samples]) + batch['spk_embed'] = spk_embed + if hparams['use_spk_id']: + spk_ids = torch.LongTensor([s['spk_id'] for s in samples]) + batch['spk_ids'] = spk_ids + return batch + + +class FastSpeechDataset(BaseTTSDataset): + def __init__(self, prefix, shuffle=False, test_items=None, test_sizes=None, data_dir=None): + super().__init__(prefix, shuffle, test_items, test_sizes, data_dir) + self.f0_mean, self.f0_std = hparams.get('f0_mean', None), hparams.get('f0_std', None) + if prefix == 'test' and hparams['test_input_dir'] != '': + self.data_dir = hparams['test_input_dir'] + self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}') + self.indexed_ds = sorted(self.indexed_ds, key=lambda item: item['item_name']) + items = {} + for i in range(len(self.indexed_ds)): + speaker = self.indexed_ds[i]['item_name'].split('_')[0] + if speaker not in items.keys(): + items[speaker] = [i] + else: + items[speaker].append(i) + sort_item = sorted(items.values(), key=lambda item_pre_speaker: len(item_pre_speaker), reverse=True) + self.avail_idxs = [n for a in sort_item for n in a][:hparams['num_test_samples']] + self.indexed_ds, self.sizes = self.load_test_inputs() + self.avail_idxs = [i for i in range(hparams['num_test_samples'])] + + if hparams['pitch_type'] == 'cwt': + _, hparams['cwt_scales'] = get_lf0_cwt(np.ones(10)) + + def __getitem__(self, index): + sample = super(FastSpeechDataset, self).__getitem__(index) + item = self._get_item(index) + hparams = self.hparams + max_frames = hparams['max_frames'] + spec = sample['mel'] + T = spec.shape[0] + phone = sample['txt_token'] + sample['energy'] = (spec.exp() ** 2).sum(-1).sqrt() + sample['mel2ph'] = mel2ph = torch.LongTensor(item['mel2ph'])[:T] if 'mel2ph' in item else None + if hparams['use_pitch_embed']: + assert 'f0' in item + if hparams.get('normalize_pitch', False): + f0 = item["f0"] + if len(f0 > 0) > 0 and f0[f0 > 0].std() > 0: + f0[f0 > 0] = (f0[f0 > 0] - f0[f0 > 0].mean()) / f0[f0 > 0].std() * hparams['f0_std'] + \ + hparams['f0_mean'] + f0[f0 > 0] = f0[f0 > 0].clip(min=60, max=500) + pitch = f0_to_coarse(f0) + pitch = torch.LongTensor(pitch[:max_frames]) + else: + pitch = torch.LongTensor(item.get("pitch"))[:max_frames] if "pitch" in item else None + f0, uv = norm_interp_f0(item["f0"][:max_frames], hparams) + uv = torch.FloatTensor(uv) + f0 = torch.FloatTensor(f0) + if hparams['pitch_type'] == 'cwt': + cwt_spec = torch.Tensor(item['cwt_spec'])[:max_frames] + f0_mean = item.get('f0_mean', item.get('cwt_mean')) + f0_std = item.get('f0_std', item.get('cwt_std')) + sample.update({"cwt_spec": cwt_spec, "f0_mean": f0_mean, "f0_std": f0_std}) + elif hparams['pitch_type'] == 'ph': + if "f0_ph" in item: + f0 = torch.FloatTensor(item['f0_ph']) + else: + f0 = denorm_f0(f0, None, hparams) + f0_phlevel_sum = torch.zeros_like(phone).float().scatter_add(0, mel2ph - 1, f0) + f0_phlevel_num = torch.zeros_like(phone).float().scatter_add( + 0, mel2ph - 1, torch.ones_like(f0)).clamp_min(1) + f0_ph = f0_phlevel_sum / f0_phlevel_num + f0, uv = norm_interp_f0(f0_ph, hparams) + else: + f0 = uv = torch.zeros_like(mel2ph) + pitch = None + sample["f0"], sample["uv"], sample["pitch"] = f0, uv, pitch + if hparams['use_spk_embed']: + sample["spk_embed"] = torch.Tensor(item['spk_embed']) + if hparams['use_spk_id']: + sample["spk_id"] = item['spk_id'] + return sample + + def collater(self, samples): + if len(samples) == 0: + return {} + hparams = self.hparams + batch = super(FastSpeechDataset, self).collater(samples) + f0 = utils.collate_1d([s['f0'] for s in samples], 0.0) + pitch = utils.collate_1d([s['pitch'] for s in samples]) if samples[0]['pitch'] is not None else None + uv = utils.collate_1d([s['uv'] for s in samples]) + energy = utils.collate_1d([s['energy'] for s in samples], 0.0) + mel2ph = utils.collate_1d([s['mel2ph'] for s in samples], 0.0) \ + if samples[0]['mel2ph'] is not None else None + batch.update({ + 'mel2ph': mel2ph, + 'energy': energy, + 'pitch': pitch, + 'f0': f0, + 'uv': uv, + }) + if hparams['pitch_type'] == 'cwt': + cwt_spec = utils.collate_2d([s['cwt_spec'] for s in samples]) + f0_mean = torch.Tensor([s['f0_mean'] for s in samples]) + f0_std = torch.Tensor([s['f0_std'] for s in samples]) + batch.update({'cwt_spec': cwt_spec, 'f0_mean': f0_mean, 'f0_std': f0_std}) + return batch + + def load_test_inputs(self): + binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizerr.BaseBinarizer') + pkg = ".".join(binarizer_cls.split(".")[:-1]) + cls_name = binarizer_cls.split(".")[-1] + binarizer_cls = getattr(importlib.import_module(pkg), cls_name) + ph_set_fn = f"{hparams['binary_data_dir']}/phone_set.json" + ph_set = json.load(open(ph_set_fn, 'r')) + print("| phone set: ", ph_set) + phone_encoder = build_phone_encoder(hparams['binary_data_dir']) + word_encoder = None + voice_encoder = VoiceEncoder().cuda() + encoder = [phone_encoder, word_encoder] + sizes = [] + items = [] + for i in range(len(self.avail_idxs)): + item = self._get_item(i) + + item2tgfn = f"{hparams['test_input_dir'].replace('binary', 'processed')}/mfa_outputs/{item['item_name']}.TextGrid" + item = binarizer_cls.process_item(item['item_name'], item['ph'], item['txt'], item2tgfn, + item['wav_fn'], item['spk_id'], encoder, hparams['binarization_args']) + item['spk_embed'] = voice_encoder.embed_utterance(item['wav']) \ + if hparams['binarization_args']['with_spk_embed'] else None # 判断是否保存embedding文件 + items.append(item) + sizes.append(item['len']) + return items, sizes + +class FastSpeechWordDataset(FastSpeechDataset): + def __getitem__(self, index): + sample = super(FastSpeechWordDataset, self).__getitem__(index) + item = self._get_item(index) + max_frames = hparams['max_frames'] + sample["ph_words"] = item["ph_words"] + sample["word_tokens"] = torch.LongTensor(item["word_tokens"]) + sample["mel2word"] = torch.LongTensor(item.get("mel2word"))[:max_frames] + sample["ph2word"] = torch.LongTensor(item['ph2word'][:hparams['max_input_tokens']]) + return sample + + def collater(self, samples): + batch = super(FastSpeechWordDataset, self).collater(samples) + ph_words = [s['ph_words'] for s in samples] + batch['ph_words'] = ph_words + word_tokens = utils.collate_1d([s['word_tokens'] for s in samples], 0) + batch['word_tokens'] = word_tokens + mel2word = utils.collate_1d([s['mel2word'] for s in samples], 0) + batch['mel2word'] = mel2word + ph2word = utils.collate_1d([s['ph2word'] for s in samples], 0) + batch['ph2word'] = ph2word + return batch diff --git a/NeuralSeq/tasks/tts/fs2.py b/NeuralSeq/tasks/tts/fs2.py new file mode 100644 index 0000000000000000000000000000000000000000..620d7f3faa53a5326ef97707b9de53506ab059bb --- /dev/null +++ b/NeuralSeq/tasks/tts/fs2.py @@ -0,0 +1,509 @@ +import matplotlib +matplotlib.use('Agg') +from utils import audio +import matplotlib.pyplot as plt +from data_gen.tts.data_gen_utils import get_pitch +from tasks.tts.fs2_utils import FastSpeechDataset +from utils.cwt import cwt2f0 +from utils.pl_utils import data_loader +import os +from multiprocessing.pool import Pool +from tqdm import tqdm +from modules.fastspeech.tts_modules import mel2ph_to_dur +from utils.hparams import hparams +from utils.plot import spec_to_figure, dur_to_figure, f0_to_figure +from utils.pitch_utils import denorm_f0 +from modules.fastspeech.fs2 import FastSpeech2 +from tasks.tts.tts import TtsTask +import torch +import torch.optim +import torch.utils.data +import torch.nn.functional as F +import utils +import torch.distributions +import numpy as np +from modules.commons.ssim import ssim + +class FastSpeech2Task(TtsTask): + def __init__(self): + super(FastSpeech2Task, self).__init__() + self.dataset_cls = FastSpeechDataset + self.mse_loss_fn = torch.nn.MSELoss() + mel_losses = hparams['mel_loss'].split("|") + self.loss_and_lambda = {} + for i, l in enumerate(mel_losses): + if l == '': + continue + if ':' in l: + l, lbd = l.split(":") + lbd = float(lbd) + else: + lbd = 1.0 + self.loss_and_lambda[l] = lbd + print("| Mel losses:", self.loss_and_lambda) + self.sil_ph = self.phone_encoder.sil_phonemes() + + @data_loader + def train_dataloader(self): + train_dataset = self.dataset_cls(hparams['train_set_name'], shuffle=True) + return self.build_dataloader(train_dataset, True, self.max_tokens, self.max_sentences, + endless=hparams['endless_ds']) + + @data_loader + def val_dataloader(self): + valid_dataset = self.dataset_cls(hparams['valid_set_name'], shuffle=False) + return self.build_dataloader(valid_dataset, False, self.max_eval_tokens, self.max_eval_sentences) + + @data_loader + def test_dataloader(self): + test_dataset = self.dataset_cls(hparams['test_set_name'], shuffle=False) + return self.build_dataloader(test_dataset, False, self.max_eval_tokens, + self.max_eval_sentences, batch_by_size=False) + + def build_tts_model(self): + self.model = FastSpeech2(self.phone_encoder) + + def build_model(self): + self.build_tts_model() + if hparams['load_ckpt'] != '': + self.load_ckpt(hparams['load_ckpt'], strict=True) + utils.print_arch(self.model) + return self.model + + def _training_step(self, sample, batch_idx, _): + loss_output = self.run_model(self.model, sample) + total_loss = sum([v for v in loss_output.values() if isinstance(v, torch.Tensor) and v.requires_grad]) + loss_output['batch_size'] = sample['txt_tokens'].size()[0] + return total_loss, loss_output + + def validation_step(self, sample, batch_idx): + outputs = {} + outputs['losses'] = {} + outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True) + outputs['total_loss'] = sum(outputs['losses'].values()) + outputs['nsamples'] = sample['nsamples'] + mel_out = self.model.out2mel(model_out['mel_out']) + outputs = utils.tensors_to_scalars(outputs) + # if sample['mels'].shape[0] == 1: + # self.add_laplace_var(mel_out, sample['mels'], outputs) + if batch_idx < hparams['num_valid_plots']: + self.plot_mel(batch_idx, sample['mels'], mel_out) + self.plot_dur(batch_idx, sample, model_out) + if hparams['use_pitch_embed']: + self.plot_pitch(batch_idx, sample, model_out) + return outputs + + def _validation_end(self, outputs): + all_losses_meter = { + 'total_loss': utils.AvgrageMeter(), + } + for output in outputs: + n = output['nsamples'] + for k, v in output['losses'].items(): + if k not in all_losses_meter: + all_losses_meter[k] = utils.AvgrageMeter() + all_losses_meter[k].update(v, n) + all_losses_meter['total_loss'].update(output['total_loss'], n) + return {k: round(v.avg, 4) for k, v in all_losses_meter.items()} + + def run_model(self, model, sample, return_output=False): + txt_tokens = sample['txt_tokens'] # [B, T_t] + target = sample['mels'] # [B, T_s, 80] + mel2ph = sample['mel2ph'] # [B, T_s] + f0 = sample['f0'] + uv = sample['uv'] + energy = sample['energy'] + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + if hparams['pitch_type'] == 'cwt': + cwt_spec = sample[f'cwt_spec'] + f0_mean = sample['f0_mean'] + f0_std = sample['f0_std'] + sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph) + + output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, + ref_mels=target, f0=f0, uv=uv, energy=energy, infer=False) + + losses = {} + self.add_mel_loss(output['mel_out'], target, losses) + self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses) + if hparams['use_pitch_embed']: + self.add_pitch_loss(output, sample, losses) + if hparams['use_energy_embed']: + self.add_energy_loss(output['energy_pred'], energy, losses) + if not return_output: + return losses + else: + return losses, output + + ############ + # losses + ############ + def add_mel_loss(self, mel_out, target, losses, postfix='', mel_mix_loss=None): + if mel_mix_loss is None: + for loss_name, lbd in self.loss_and_lambda.items(): + if 'l1' == loss_name: + l = self.l1_loss(mel_out, target) + elif 'mse' == loss_name: + raise NotImplementedError + elif 'ssim' == loss_name: + l = self.ssim_loss(mel_out, target) + elif 'gdl' == loss_name: + raise NotImplementedError + losses[f'{loss_name}{postfix}'] = l * lbd + else: + raise NotImplementedError + + def l1_loss(self, decoder_output, target): + # decoder_output : B x T x n_mel + # target : B x T x n_mel + l1_loss = F.l1_loss(decoder_output, target, reduction='none') + weights = self.weights_nonzero_speech(target) + l1_loss = (l1_loss * weights).sum() / weights.sum() + return l1_loss + + def ssim_loss(self, decoder_output, target, bias=6.0): + # decoder_output : B x T x n_mel + # target : B x T x n_mel + assert decoder_output.shape == target.shape + weights = self.weights_nonzero_speech(target) + decoder_output = decoder_output[:, None] + bias + target = target[:, None] + bias + ssim_loss = 1 - ssim(decoder_output, target, size_average=False) + ssim_loss = (ssim_loss * weights).sum() / weights.sum() + return ssim_loss + + def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, losses=None): + """ + + :param dur_pred: [B, T], float, log scale + :param mel2ph: [B, T] + :param txt_tokens: [B, T] + :param losses: + :return: + """ + B, T = txt_tokens.shape + nonpadding = (txt_tokens != 0).float() + dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding + is_sil = torch.zeros_like(txt_tokens).bool() + for p in self.sil_ph: + is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0]) + is_sil = is_sil.float() # [B, T_txt] + + # phone duration loss + if hparams['dur_loss'] == 'mse': + losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none') + losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum() + dur_pred = (dur_pred.exp() - 1).clamp(min=0) + elif hparams['dur_loss'] == 'mog': + return NotImplementedError + elif hparams['dur_loss'] == 'crf': + losses['pdur'] = -self.model.dur_predictor.crf( + dur_pred, dur_gt.long().clamp(min=0, max=31), mask=nonpadding > 0, reduction='mean') + losses['pdur'] = losses['pdur'] * hparams['lambda_ph_dur'] + + # use linear scale for sent and word duration + if hparams['lambda_word_dur'] > 0: + word_id = (is_sil.cumsum(-1) * (1 - is_sil)).long() + word_dur_p = dur_pred.new_zeros([B, word_id.max() + 1]).scatter_add(1, word_id, dur_pred)[:, 1:] + word_dur_g = dur_gt.new_zeros([B, word_id.max() + 1]).scatter_add(1, word_id, dur_gt)[:, 1:] + wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none') + word_nonpadding = (word_dur_g > 0).float() + wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum() + losses['wdur'] = wdur_loss * hparams['lambda_word_dur'] + if hparams['lambda_sent_dur'] > 0: + sent_dur_p = dur_pred.sum(-1) + sent_dur_g = dur_gt.sum(-1) + sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean') + losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] + + def add_pitch_loss(self, output, sample, losses): + if hparams['pitch_type'] == 'ph': + nonpadding = (sample['txt_tokens'] != 0).float() + pitch_loss_fn = F.l1_loss if hparams['pitch_loss'] == 'l1' else F.mse_loss + losses['f0'] = (pitch_loss_fn(output['pitch_pred'][:, :, 0], sample['f0'], + reduction='none') * nonpadding).sum() \ + / nonpadding.sum() * hparams['lambda_f0'] + return + mel2ph = sample['mel2ph'] # [B, T_s] + f0 = sample['f0'] + uv = sample['uv'] + nonpadding = (mel2ph != 0).float() + if hparams['pitch_type'] == 'cwt': + cwt_spec = sample[f'cwt_spec'] + f0_mean = sample['f0_mean'] + f0_std = sample['f0_std'] + cwt_pred = output['cwt'][:, :, :10] + f0_mean_pred = output['f0_mean'] + f0_std_pred = output['f0_std'] + losses['C'] = self.cwt_loss(cwt_pred, cwt_spec) * hparams['lambda_f0'] + if hparams['use_uv']: + assert output['cwt'].shape[-1] == 11 + uv_pred = output['cwt'][:, :, -1] + losses['uv'] = (F.binary_cross_entropy_with_logits(uv_pred, uv, reduction='none') * nonpadding) \ + .sum() / nonpadding.sum() * hparams['lambda_uv'] + losses['f0_mean'] = F.l1_loss(f0_mean_pred, f0_mean) * hparams['lambda_f0'] + losses['f0_std'] = F.l1_loss(f0_std_pred, f0_std) * hparams['lambda_f0'] + if hparams['cwt_add_f0_loss']: + f0_cwt_ = self.model.cwt2f0_norm(cwt_pred, f0_mean_pred, f0_std_pred, mel2ph) + self.add_f0_loss(f0_cwt_[:, :, None], f0, uv, losses, nonpadding=nonpadding) + elif hparams['pitch_type'] == 'frame': + self.add_f0_loss(output['pitch_pred'], f0, uv, losses, nonpadding=nonpadding) + + def add_f0_loss(self, p_pred, f0, uv, losses, nonpadding): + assert p_pred[..., 0].shape == f0.shape + if hparams['use_uv']: + assert p_pred[..., 1].shape == uv.shape + losses['uv'] = (F.binary_cross_entropy_with_logits( + p_pred[:, :, 1], uv, reduction='none') * nonpadding).sum() \ + / nonpadding.sum() * hparams['lambda_uv'] + nonpadding = nonpadding * (uv == 0).float() + + f0_pred = p_pred[:, :, 0] + if hparams['pitch_loss'] in ['l1', 'l2']: + pitch_loss_fn = F.l1_loss if hparams['pitch_loss'] == 'l1' else F.mse_loss + losses['f0'] = (pitch_loss_fn(f0_pred, f0, reduction='none') * nonpadding).sum() \ + / nonpadding.sum() * hparams['lambda_f0'] + elif hparams['pitch_loss'] == 'ssim': + return NotImplementedError + + def cwt_loss(self, cwt_p, cwt_g): + if hparams['cwt_loss'] == 'l1': + return F.l1_loss(cwt_p, cwt_g) + if hparams['cwt_loss'] == 'l2': + return F.mse_loss(cwt_p, cwt_g) + if hparams['cwt_loss'] == 'ssim': + return self.ssim_loss(cwt_p, cwt_g, 20) + + def add_energy_loss(self, energy_pred, energy, losses): + nonpadding = (energy != 0).float() + loss = (F.mse_loss(energy_pred, energy, reduction='none') * nonpadding).sum() / nonpadding.sum() + loss = loss * hparams['lambda_energy'] + losses['e'] = loss + + + ############ + # validation plots + ############ + def plot_mel(self, batch_idx, spec, spec_out, name=None): + spec_cat = torch.cat([spec, spec_out], -1) + name = f'mel_{batch_idx}' if name is None else name + vmin = hparams['mel_vmin'] + vmax = hparams['mel_vmax'] + self.logger.experiment.add_figure(name, spec_to_figure(spec_cat[0], vmin, vmax), self.global_step) + + def plot_dur(self, batch_idx, sample, model_out): + T_txt = sample['txt_tokens'].shape[1] + dur_gt = mel2ph_to_dur(sample['mel2ph'], T_txt)[0] + dur_pred = self.model.dur_predictor.out2dur(model_out['dur']).float() + txt = self.phone_encoder.decode(sample['txt_tokens'][0].cpu().numpy()) + txt = txt.split(" ") + self.logger.experiment.add_figure( + f'dur_{batch_idx}', dur_to_figure(dur_gt, dur_pred, txt), self.global_step) + + def plot_pitch(self, batch_idx, sample, model_out): + f0 = sample['f0'] + if hparams['pitch_type'] == 'ph': + mel2ph = sample['mel2ph'] + f0 = self.expand_f0_ph(f0, mel2ph) + f0_pred = self.expand_f0_ph(model_out['pitch_pred'][:, :, 0], mel2ph) + self.logger.experiment.add_figure( + f'f0_{batch_idx}', f0_to_figure(f0[0], None, f0_pred[0]), self.global_step) + return + f0 = denorm_f0(f0, sample['uv'], hparams) + if hparams['pitch_type'] == 'cwt': + # cwt + cwt_out = model_out['cwt'] + cwt_spec = cwt_out[:, :, :10] + cwt = torch.cat([cwt_spec, sample['cwt_spec']], -1) + self.logger.experiment.add_figure(f'cwt_{batch_idx}', spec_to_figure(cwt[0]), self.global_step) + # f0 + f0_pred = cwt2f0(cwt_spec, model_out['f0_mean'], model_out['f0_std'], hparams['cwt_scales']) + if hparams['use_uv']: + assert cwt_out.shape[-1] == 11 + uv_pred = cwt_out[:, :, -1] > 0 + f0_pred[uv_pred > 0] = 0 + f0_cwt = denorm_f0(sample['f0_cwt'], sample['uv'], hparams) + self.logger.experiment.add_figure( + f'f0_{batch_idx}', f0_to_figure(f0[0], f0_cwt[0], f0_pred[0]), self.global_step) + elif hparams['pitch_type'] == 'frame': + # f0 + uv_pred = model_out['pitch_pred'][:, :, 1] > 0 + pitch_pred = denorm_f0(model_out['pitch_pred'][:, :, 0], uv_pred, hparams) + self.logger.experiment.add_figure( + f'f0_{batch_idx}', f0_to_figure(f0[0], None, pitch_pred[0]), self.global_step) + + ############ + # infer + ############ + def test_step(self, sample, batch_idx): + spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') + txt_tokens = sample['txt_tokens'] + mel2ph, uv, f0 = None, None, None + ref_mels = None + if hparams['profile_infer']: + pass + else: + if hparams['use_gt_dur']: + mel2ph = sample['mel2ph'] + if hparams['use_gt_f0']: + f0 = sample['f0'] + uv = sample['uv'] + print('Here using gt f0!!') + if hparams.get('use_midi') is not None and hparams['use_midi']: + outputs = self.model( + txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, ref_mels=ref_mels, infer=True, + pitch_midi=sample['pitch_midi'], midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur')) + else: + outputs = self.model( + txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, ref_mels=ref_mels, infer=True) + sample['outputs'] = self.model.out2mel(outputs['mel_out']) + sample['mel2ph_pred'] = outputs['mel2ph'] + if hparams.get('pe_enable') is not None and hparams['pe_enable']: + sample['f0'] = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel + sample['f0_pred'] = self.pe(sample['outputs'])['f0_denorm_pred'] # pe predict from Pred mel + else: + sample['f0'] = denorm_f0(sample['f0'], sample['uv'], hparams) + sample['f0_pred'] = outputs.get('f0_denorm') + return self.after_infer(sample) + + def after_infer(self, predictions): + if self.saving_result_pool is None and not hparams['profile_infer']: + self.saving_result_pool = Pool(min(int(os.getenv('N_PROC', os.cpu_count())), 16)) + self.saving_results_futures = [] + predictions = utils.unpack_dict_to_list(predictions) + t = tqdm(predictions) + for num_predictions, prediction in enumerate(t): + for k, v in prediction.items(): + if type(v) is torch.Tensor: + prediction[k] = v.cpu().numpy() + + item_name = prediction.get('item_name') + text = prediction.get('text').replace(":", "%3A")[:80] + + # remove paddings + mel_gt = prediction["mels"] + mel_gt_mask = np.abs(mel_gt).sum(-1) > 0 + mel_gt = mel_gt[mel_gt_mask] + mel2ph_gt = prediction.get("mel2ph") + mel2ph_gt = mel2ph_gt[mel_gt_mask] if mel2ph_gt is not None else None + mel_pred = prediction["outputs"] + mel_pred_mask = np.abs(mel_pred).sum(-1) > 0 + mel_pred = mel_pred[mel_pred_mask] + mel_gt = np.clip(mel_gt, hparams['mel_vmin'], hparams['mel_vmax']) + mel_pred = np.clip(mel_pred, hparams['mel_vmin'], hparams['mel_vmax']) + + mel2ph_pred = prediction.get("mel2ph_pred") + if mel2ph_pred is not None: + if len(mel2ph_pred) > len(mel_pred_mask): + mel2ph_pred = mel2ph_pred[:len(mel_pred_mask)] + mel2ph_pred = mel2ph_pred[mel_pred_mask] + + f0_gt = prediction.get("f0") + f0_pred = prediction.get("f0_pred") + if f0_pred is not None: + f0_gt = f0_gt[mel_gt_mask] + if len(f0_pred) > len(mel_pred_mask): + f0_pred = f0_pred[:len(mel_pred_mask)] + f0_pred = f0_pred[mel_pred_mask] + + str_phs = None + if self.phone_encoder is not None and 'txt_tokens' in prediction: + str_phs = self.phone_encoder.decode(prediction['txt_tokens'], strip_padding=True) + gen_dir = os.path.join(hparams['work_dir'], + f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}') + wav_pred = self.vocoder.spec2wav(mel_pred, f0=f0_pred) + if not hparams['profile_infer']: + os.makedirs(gen_dir, exist_ok=True) + os.makedirs(f'{gen_dir}/wavs', exist_ok=True) + os.makedirs(f'{gen_dir}/plot', exist_ok=True) + os.makedirs(os.path.join(hparams['work_dir'], 'P_mels_npy'), exist_ok=True) + os.makedirs(os.path.join(hparams['work_dir'], 'G_mels_npy'), exist_ok=True) + self.saving_results_futures.append( + self.saving_result_pool.apply_async(self.save_result, args=[ + wav_pred, mel_pred, 'P', item_name, text, gen_dir, str_phs, mel2ph_pred, f0_gt, f0_pred])) + + if mel_gt is not None and hparams['save_gt']: + wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt) + self.saving_results_futures.append( + self.saving_result_pool.apply_async(self.save_result, args=[ + wav_gt, mel_gt, 'G', item_name, text, gen_dir, str_phs, mel2ph_gt, f0_gt, f0_pred])) + if hparams['save_f0']: + import matplotlib.pyplot as plt + # f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams) + f0_pred_ = f0_pred + f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams) + fig = plt.figure() + plt.plot(f0_pred_, label=r'$f0_P$') + plt.plot(f0_gt_, label=r'$f0_G$') + if hparams.get('pe_enable') is not None and hparams['pe_enable']: + # f0_midi = prediction.get("f0_midi") + # f0_midi = f0_midi[mel_gt_mask] + # plt.plot(f0_midi, label=r'$f0_M$') + pass + plt.legend() + plt.tight_layout() + plt.savefig(f'{gen_dir}/plot/[F0][{item_name}]{text}.png', format='png') + plt.close(fig) + + t.set_description( + f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}") + else: + if 'gen_wav_time' not in self.stats: + self.stats['gen_wav_time'] = 0 + self.stats['gen_wav_time'] += len(wav_pred) / hparams['audio_sample_rate'] + print('gen_wav_time: ', self.stats['gen_wav_time']) + + return {} + + @staticmethod + def save_result(wav_out, mel, prefix, item_name, text, gen_dir, str_phs=None, mel2ph=None, gt_f0=None, pred_f0=None): + item_name = item_name.replace('/', '-') + base_fn = f'[{item_name}][{prefix}]' + + if text is not None: + base_fn += text + base_fn += ('-' + hparams['exp_name']) + np.save(os.path.join(hparams['work_dir'], f'{prefix}_mels_npy', item_name), mel) + audio.save_wav(wav_out, f'{gen_dir}/wavs/{base_fn}.wav', hparams['audio_sample_rate'], + norm=hparams['out_wav_norm']) + fig = plt.figure(figsize=(14, 10)) + spec_vmin = hparams['mel_vmin'] + spec_vmax = hparams['mel_vmax'] + heatmap = plt.pcolor(mel.T, vmin=spec_vmin, vmax=spec_vmax) + fig.colorbar(heatmap) + if hparams.get('pe_enable') is not None and hparams['pe_enable']: + gt_f0 = (gt_f0 - 100) / (800 - 100) * 80 * (gt_f0 > 0) + pred_f0 = (pred_f0 - 100) / (800 - 100) * 80 * (pred_f0 > 0) + plt.plot(pred_f0, c='white', linewidth=1, alpha=0.6) + plt.plot(gt_f0, c='red', linewidth=1, alpha=0.6) + else: + f0, _ = get_pitch(wav_out, mel, hparams) + f0 = (f0 - 100) / (800 - 100) * 80 * (f0 > 0) + plt.plot(f0, c='white', linewidth=1, alpha=0.6) + if mel2ph is not None and str_phs is not None: + decoded_txt = str_phs.split(" ") + dur = mel2ph_to_dur(torch.LongTensor(mel2ph)[None, :], len(decoded_txt))[0].numpy() + dur = [0] + list(np.cumsum(dur)) + for i in range(len(dur) - 1): + shift = (i % 20) + 1 + plt.text(dur[i], shift, decoded_txt[i]) + plt.hlines(shift, dur[i], dur[i + 1], colors='b' if decoded_txt[i] != '|' else 'black') + plt.vlines(dur[i], 0, 5, colors='b' if decoded_txt[i] != '|' else 'black', + alpha=1, linewidth=1) + plt.tight_layout() + plt.savefig(f'{gen_dir}/plot/{base_fn}.png', format='png', dpi=1000) + plt.close(fig) + + ############## + # utils + ############## + @staticmethod + def expand_f0_ph(f0, mel2ph): + f0 = denorm_f0(f0, None, hparams) + f0 = F.pad(f0, [1, 0]) + f0 = torch.gather(f0, 1, mel2ph) # [B, T_mel] + return f0 + + +if __name__ == '__main__': + FastSpeech2Task.start() diff --git a/NeuralSeq/tasks/tts/fs2_adv.py b/NeuralSeq/tasks/tts/fs2_adv.py new file mode 100644 index 0000000000000000000000000000000000000000..ebf48a8dd79e14fda3e8039d9ac8c00b58d3bbbb --- /dev/null +++ b/NeuralSeq/tasks/tts/fs2_adv.py @@ -0,0 +1,129 @@ +from tasks.tts.fs2 import FastSpeech2Task +from modules.syntaspeech.multi_window_disc import Discriminator +from utils.hparams import hparams +from torch import nn +import torch +import torch.optim +import torch.utils.data +import utils + + +class FastSpeech2AdvTask(FastSpeech2Task): + def build_model(self): + self.build_tts_model() + if hparams['load_ckpt'] != '': + self.load_ckpt(hparams['load_ckpt'], strict=False) + utils.print_arch(self.model, 'Generator') + self.build_disc_model() + if not hasattr(self, 'gen_params'): + self.gen_params = list(self.model.parameters()) + return self.model + + def build_disc_model(self): + disc_win_num = hparams['disc_win_num'] + h = hparams['mel_disc_hidden_size'] + self.mel_disc = Discriminator( + time_lengths=[32, 64, 128][:disc_win_num], + freq_length=80, hidden_size=h, kernel=(3, 3) + ) + self.disc_params = list(self.mel_disc.parameters()) + utils.print_arch(self.mel_disc, model_name='Mel Disc') + + def _training_step(self, sample, batch_idx, optimizer_idx): + log_outputs = {} + loss_weights = {} + disc_start = hparams['mel_gan'] and self.global_step >= hparams["disc_start_steps"] and \ + hparams['lambda_mel_adv'] > 0 + if optimizer_idx == 0: + ####################### + # Generator # + ####################### + log_outputs, model_out = self.run_model(self.model, sample, return_output=True) + self.model_out = {k: v.detach() for k, v in model_out.items() if isinstance(v, torch.Tensor)} + if disc_start: + self.disc_cond = disc_cond = self.model_out['decoder_inp'].detach() \ + if hparams['use_cond_disc'] else None + if hparams['mel_loss_no_noise']: + self.add_mel_loss(model_out['mel_out_nonoise'], sample['mels'], log_outputs) + mel_p = model_out['mel_out'] + if hasattr(self.model, 'out2mel'): + mel_p = self.model.out2mel(mel_p) + o_ = self.mel_disc(mel_p, disc_cond) + p_, pc_ = o_['y'], o_['y_c'] + + if p_ is not None: + log_outputs['a'] = self.mse_loss_fn(p_, p_.new_ones(p_.size())) + loss_weights['a'] = hparams['lambda_mel_adv'] + if pc_ is not None: + log_outputs['ac'] = self.mse_loss_fn(pc_, pc_.new_ones(pc_.size())) + loss_weights['ac'] = hparams['lambda_mel_adv'] + else: + ####################### + # Discriminator # + ####################### + if disc_start and self.global_step % hparams['disc_interval'] == 0: + if hparams['rerun_gen']: + with torch.no_grad(): + _, model_out = self.run_model(self.model, sample, return_output=True) + else: + model_out = self.model_out + mel_g = sample['mels'] + mel_p = model_out['mel_out'] + if hasattr(self.model, 'out2mel'): + mel_p = self.model.out2mel(mel_p) + + o = self.mel_disc(mel_g, self.disc_cond) + p, pc = o['y'], o['y_c'] + o_ = self.mel_disc(mel_p, self.disc_cond) + p_, pc_ = o_['y'], o_['y_c'] + + if p_ is not None: + log_outputs["r"] = self.mse_loss_fn(p, p.new_ones(p.size())) + log_outputs["f"] = self.mse_loss_fn(p_, p_.new_zeros(p_.size())) + + if pc_ is not None: + log_outputs["rc"] = self.mse_loss_fn(pc, pc.new_ones(pc.size())) + log_outputs["fc"] = self.mse_loss_fn(pc_, pc_.new_zeros(pc_.size())) + + if len(log_outputs) == 0: + return None + total_loss = sum([loss_weights.get(k, 1) * v for k, v in log_outputs.items()]) + + log_outputs['bs'] = sample['mels'].shape[0] + return total_loss, log_outputs + + def configure_optimizers(self): + if not hasattr(self, 'gen_params'): + self.gen_params = list(self.model.parameters()) + optimizer_gen = torch.optim.AdamW( + self.gen_params, + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + optimizer_disc = torch.optim.AdamW( + self.disc_params, + lr=hparams['disc_lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + **hparams["discriminator_optimizer_params"]) if len(self.disc_params) > 0 else None + self.scheduler = self.build_scheduler({'gen': optimizer_gen, 'disc': optimizer_disc}) + return [optimizer_gen, optimizer_disc] + + def build_scheduler(self, optimizer): + return { + "gen": super().build_scheduler(optimizer['gen']), + "disc": torch.optim.lr_scheduler.StepLR( + optimizer=optimizer["disc"], + **hparams["discriminator_scheduler_params"]) if optimizer["disc"] is not None else None, + } + + def on_before_optimization(self, opt_idx): + if opt_idx == 0: + nn.utils.clip_grad_norm_(self.gen_params, hparams['generator_grad_norm']) + else: + nn.utils.clip_grad_norm_(self.disc_params, hparams["discriminator_grad_norm"]) + + def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx): + if optimizer_idx == 0: + self.scheduler['gen'].step(self.global_step) + else: + self.scheduler['disc'].step(max(self.global_step - hparams["disc_start_steps"], 1)) diff --git a/NeuralSeq/tasks/tts/fs2_utils.py b/NeuralSeq/tasks/tts/fs2_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..092550863d2fd72f008cc790bc6d950340e68182 --- /dev/null +++ b/NeuralSeq/tasks/tts/fs2_utils.py @@ -0,0 +1,173 @@ +import matplotlib + +matplotlib.use('Agg') + +import glob +import importlib +from utils.cwt import get_lf0_cwt +import os +import torch.optim +import torch.utils.data +from utils.indexed_datasets import IndexedDataset +from utils.pitch_utils import norm_interp_f0 +import numpy as np +from tasks.base_task import BaseDataset +import torch +import torch.optim +import torch.utils.data +import utils +import torch.distributions +from utils.hparams import hparams + + +class FastSpeechDataset(BaseDataset): + def __init__(self, prefix, shuffle=False): + super().__init__(shuffle) + self.data_dir = hparams['binary_data_dir'] + self.prefix = prefix + self.hparams = hparams + self.sizes = np.load(f'{self.data_dir}/{self.prefix}_lengths.npy') + self.indexed_ds = None + # self.name2spk_id={} + + # pitch stats + f0_stats_fn = f'{self.data_dir}/train_f0s_mean_std.npy' + if os.path.exists(f0_stats_fn): + hparams['f0_mean'], hparams['f0_std'] = self.f0_mean, self.f0_std = np.load(f0_stats_fn) + hparams['f0_mean'] = float(hparams['f0_mean']) + hparams['f0_std'] = float(hparams['f0_std']) + else: + hparams['f0_mean'], hparams['f0_std'] = self.f0_mean, self.f0_std = None, None + + if prefix == 'test': + if hparams['test_input_dir'] != '': + self.indexed_ds, self.sizes = self.load_test_inputs(hparams['test_input_dir']) + else: + if hparams['num_test_samples'] > 0: + self.avail_idxs = list(range(hparams['num_test_samples'])) + hparams['test_ids'] + self.sizes = [self.sizes[i] for i in self.avail_idxs] + + if hparams['pitch_type'] == 'cwt': + _, hparams['cwt_scales'] = get_lf0_cwt(np.ones(10)) + + def _get_item(self, index): + if hasattr(self, 'avail_idxs') and self.avail_idxs is not None: + index = self.avail_idxs[index] + if self.indexed_ds is None: + self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}') + return self.indexed_ds[index] + + def __getitem__(self, index): + hparams = self.hparams + item = self._get_item(index) + max_frames = hparams['max_frames'] + spec = torch.Tensor(item['mel'])[:max_frames] + energy = (spec.exp() ** 2).sum(-1).sqrt() + mel2ph = torch.LongTensor(item['mel2ph'])[:max_frames] if 'mel2ph' in item else None + f0, uv = norm_interp_f0(item["f0"][:max_frames], hparams) + phone = torch.LongTensor(item['phone'][:hparams['max_input_tokens']]) + pitch = torch.LongTensor(item.get("pitch"))[:max_frames] + # print(item.keys(), item['mel'].shape, spec.shape) + sample = { + "id": index, + "item_name": item['item_name'], + "text": item['txt'], + "txt_token": phone, + "mel": spec, + "pitch": pitch, + "energy": energy, + "f0": f0, + "uv": uv, + "mel2ph": mel2ph, + "mel_nonpadding": spec.abs().sum(-1) > 0, + } + if self.hparams['use_spk_embed']: + sample["spk_embed"] = torch.Tensor(item['spk_embed']) + if self.hparams['use_spk_id']: + sample["spk_id"] = item['spk_id'] + # sample['spk_id'] = 0 + # for key in self.name2spk_id.keys(): + # if key in item['item_name']: + # sample['spk_id'] = self.name2spk_id[key] + # break + if self.hparams['pitch_type'] == 'cwt': + cwt_spec = torch.Tensor(item['cwt_spec'])[:max_frames] + f0_mean = item.get('f0_mean', item.get('cwt_mean')) + f0_std = item.get('f0_std', item.get('cwt_std')) + sample.update({"cwt_spec": cwt_spec, "f0_mean": f0_mean, "f0_std": f0_std}) + elif self.hparams['pitch_type'] == 'ph': + f0_phlevel_sum = torch.zeros_like(phone).float().scatter_add(0, mel2ph - 1, f0) + f0_phlevel_num = torch.zeros_like(phone).float().scatter_add( + 0, mel2ph - 1, torch.ones_like(f0)).clamp_min(1) + sample["f0_ph"] = f0_phlevel_sum / f0_phlevel_num + return sample + + def collater(self, samples): + if len(samples) == 0: + return {} + id = torch.LongTensor([s['id'] for s in samples]) + item_names = [s['item_name'] for s in samples] + text = [s['text'] for s in samples] + txt_tokens = utils.collate_1d([s['txt_token'] for s in samples], 0) + f0 = utils.collate_1d([s['f0'] for s in samples], 0.0) + pitch = utils.collate_1d([s['pitch'] for s in samples]) + uv = utils.collate_1d([s['uv'] for s in samples]) + energy = utils.collate_1d([s['energy'] for s in samples], 0.0) + mel2ph = utils.collate_1d([s['mel2ph'] for s in samples], 0.0) \ + if samples[0]['mel2ph'] is not None else None + mels = utils.collate_2d([s['mel'] for s in samples], 0.0) + txt_lengths = torch.LongTensor([s['txt_token'].numel() for s in samples]) + mel_lengths = torch.LongTensor([s['mel'].shape[0] for s in samples]) + + batch = { + 'id': id, + 'item_name': item_names, + 'nsamples': len(samples), + 'text': text, + 'txt_tokens': txt_tokens, + 'txt_lengths': txt_lengths, + 'mels': mels, + 'mel_lengths': mel_lengths, + 'mel2ph': mel2ph, + 'energy': energy, + 'pitch': pitch, + 'f0': f0, + 'uv': uv, + } + + if self.hparams['use_spk_embed']: + spk_embed = torch.stack([s['spk_embed'] for s in samples]) + batch['spk_embed'] = spk_embed + if self.hparams['use_spk_id']: + spk_ids = torch.LongTensor([s['spk_id'] for s in samples]) + batch['spk_ids'] = spk_ids + if self.hparams['pitch_type'] == 'cwt': + cwt_spec = utils.collate_2d([s['cwt_spec'] for s in samples]) + f0_mean = torch.Tensor([s['f0_mean'] for s in samples]) + f0_std = torch.Tensor([s['f0_std'] for s in samples]) + batch.update({'cwt_spec': cwt_spec, 'f0_mean': f0_mean, 'f0_std': f0_std}) + elif self.hparams['pitch_type'] == 'ph': + batch['f0'] = utils.collate_1d([s['f0_ph'] for s in samples]) + + return batch + + def load_test_inputs(self, test_input_dir, spk_id=0): + inp_wav_paths = glob.glob(f'{test_input_dir}/*.wav') + glob.glob(f'{test_input_dir}/*.mp3') + sizes = [] + items = [] + + binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizerr.BaseBinarizer') + pkg = ".".join(binarizer_cls.split(".")[:-1]) + cls_name = binarizer_cls.split(".")[-1] + binarizer_cls = getattr(importlib.import_module(pkg), cls_name) + binarization_args = hparams['binarization_args'] + + for wav_fn in inp_wav_paths: + item_name = os.path.basename(wav_fn) + ph = txt = tg_fn = '' + wav_fn = wav_fn + encoder = None + item = binarizer_cls.process_item(item_name, ph, txt, tg_fn, wav_fn, spk_id, encoder, binarization_args) + items.append(item) + sizes.append(item['len']) + return items, sizes diff --git a/NeuralSeq/tasks/tts/pe.py b/NeuralSeq/tasks/tts/pe.py new file mode 100644 index 0000000000000000000000000000000000000000..3880c80d0820c36e044c00bd38a07fd3cce73323 --- /dev/null +++ b/NeuralSeq/tasks/tts/pe.py @@ -0,0 +1,155 @@ +import matplotlib +matplotlib.use('Agg') + +import torch +import numpy as np +import os + +from tasks.base_task import BaseDataset +from tasks.tts.fs2 import FastSpeech2Task +from modules.fastspeech.pe import PitchExtractor +import utils +from utils.indexed_datasets import IndexedDataset +from utils.hparams import hparams +from utils.plot import f0_to_figure +from utils.pitch_utils import norm_interp_f0, denorm_f0 + + +class PeDataset(BaseDataset): + def __init__(self, prefix, shuffle=False): + super().__init__(shuffle) + self.data_dir = hparams['binary_data_dir'] + self.prefix = prefix + self.hparams = hparams + self.sizes = np.load(f'{self.data_dir}/{self.prefix}_lengths.npy') + self.indexed_ds = None + + # pitch stats + f0_stats_fn = f'{self.data_dir}/train_f0s_mean_std.npy' + if os.path.exists(f0_stats_fn): + hparams['f0_mean'], hparams['f0_std'] = self.f0_mean, self.f0_std = np.load(f0_stats_fn) + hparams['f0_mean'] = float(hparams['f0_mean']) + hparams['f0_std'] = float(hparams['f0_std']) + else: + hparams['f0_mean'], hparams['f0_std'] = self.f0_mean, self.f0_std = None, None + + if prefix == 'test': + if hparams['num_test_samples'] > 0: + self.avail_idxs = list(range(hparams['num_test_samples'])) + hparams['test_ids'] + self.sizes = [self.sizes[i] for i in self.avail_idxs] + + def _get_item(self, index): + if hasattr(self, 'avail_idxs') and self.avail_idxs is not None: + index = self.avail_idxs[index] + if self.indexed_ds is None: + self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}') + return self.indexed_ds[index] + + def __getitem__(self, index): + hparams = self.hparams + item = self._get_item(index) + max_frames = hparams['max_frames'] + spec = torch.Tensor(item['mel'])[:max_frames] + # mel2ph = torch.LongTensor(item['mel2ph'])[:max_frames] if 'mel2ph' in item else None + f0, uv = norm_interp_f0(item["f0"][:max_frames], hparams) + pitch = torch.LongTensor(item.get("pitch"))[:max_frames] + # print(item.keys(), item['mel'].shape, spec.shape) + sample = { + "id": index, + "item_name": item['item_name'], + "text": item['txt'], + "mel": spec, + "pitch": pitch, + "f0": f0, + "uv": uv, + # "mel2ph": mel2ph, + # "mel_nonpadding": spec.abs().sum(-1) > 0, + } + return sample + + def collater(self, samples): + if len(samples) == 0: + return {} + id = torch.LongTensor([s['id'] for s in samples]) + item_names = [s['item_name'] for s in samples] + text = [s['text'] for s in samples] + f0 = utils.collate_1d([s['f0'] for s in samples], 0.0) + pitch = utils.collate_1d([s['pitch'] for s in samples]) + uv = utils.collate_1d([s['uv'] for s in samples]) + mels = utils.collate_2d([s['mel'] for s in samples], 0.0) + mel_lengths = torch.LongTensor([s['mel'].shape[0] for s in samples]) + # mel2ph = utils.collate_1d([s['mel2ph'] for s in samples], 0.0) \ + # if samples[0]['mel2ph'] is not None else None + # mel_nonpaddings = utils.collate_1d([s['mel_nonpadding'].float() for s in samples], 0.0) + + batch = { + 'id': id, + 'item_name': item_names, + 'nsamples': len(samples), + 'text': text, + 'mels': mels, + 'mel_lengths': mel_lengths, + 'pitch': pitch, + # 'mel2ph': mel2ph, + # 'mel_nonpaddings': mel_nonpaddings, + 'f0': f0, + 'uv': uv, + } + return batch + + +class PitchExtractionTask(FastSpeech2Task): + def __init__(self): + super().__init__() + self.dataset_cls = PeDataset + + def build_tts_model(self): + self.model = PitchExtractor(conv_layers=hparams['pitch_extractor_conv_layers']) + + # def build_scheduler(self, optimizer): + # return torch.optim.lr_scheduler.StepLR(optimizer, hparams['decay_steps'], gamma=0.5) + def _training_step(self, sample, batch_idx, _): + loss_output = self.run_model(self.model, sample) + total_loss = sum([v for v in loss_output.values() if isinstance(v, torch.Tensor) and v.requires_grad]) + loss_output['batch_size'] = sample['mels'].size()[0] + return total_loss, loss_output + + def validation_step(self, sample, batch_idx): + outputs = {} + outputs['losses'] = {} + outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=True) + outputs['total_loss'] = sum(outputs['losses'].values()) + outputs['nsamples'] = sample['nsamples'] + outputs = utils.tensors_to_scalars(outputs) + if batch_idx < hparams['num_valid_plots']: + self.plot_pitch(batch_idx, model_out, sample) + return outputs + + def run_model(self, model, sample, return_output=False, infer=False): + f0 = sample['f0'] + uv = sample['uv'] + output = model(sample['mels']) + losses = {} + self.add_pitch_loss(output, sample, losses) + if not return_output: + return losses + else: + return losses, output + + def plot_pitch(self, batch_idx, model_out, sample): + gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams) + self.logger.experiment.add_figure( + f'f0_{batch_idx}', + f0_to_figure(gt_f0[0], None, model_out['f0_denorm_pred'][0]), + self.global_step) + + def add_pitch_loss(self, output, sample, losses): + # mel2ph = sample['mel2ph'] # [B, T_s] + mel = sample['mels'] + f0 = sample['f0'] + uv = sample['uv'] + # nonpadding = (mel2ph != 0).float() if hparams['pitch_type'] == 'frame' \ + # else (sample['txt_tokens'] != 0).float() + nonpadding = (mel.abs().sum(-1) > 0).float() # sample['mel_nonpaddings'] + # print(nonpadding[0][-8:], nonpadding.shape) + self.add_f0_loss(output['pitch_pred'], f0, uv, losses, nonpadding=nonpadding) \ No newline at end of file diff --git a/NeuralSeq/tasks/tts/ps.py b/NeuralSeq/tasks/tts/ps.py new file mode 100644 index 0000000000000000000000000000000000000000..5222c102e6790605545e5adaff8edbc88e80e921 --- /dev/null +++ b/NeuralSeq/tasks/tts/ps.py @@ -0,0 +1,194 @@ +import os +import torch +import torch.nn.functional as F +from torch import nn + +from modules.portaspeech.portaspeech import PortaSpeech +from tasks.tts.fs2 import FastSpeech2Task +from utils.tts_utils import mel2token_to_dur +from utils.hparams import hparams +from utils.tts_utils import get_focus_rate, get_phone_coverage_rate, get_diagonal_focus_rate +from utils import num_params +import numpy as np + +from utils.plot import spec_to_figure +from data_gen.tts.data_gen_utils import build_token_encoder + + +class PortaSpeechTask(FastSpeech2Task): + def __init__(self): + super().__init__() + data_dir = hparams['binary_data_dir'] + self.word_encoder = build_token_encoder(f'{data_dir}/word_set.json') + + def build_tts_model(self): + ph_dict_size = len(self.token_encoder) + word_dict_size = len(self.word_encoder) + self.model = PortaSpeech(ph_dict_size, word_dict_size, hparams) + + def on_train_start(self): + super().on_train_start() + for n, m in self.model.named_children(): + num_params(m, model_name=n) + if hasattr(self.model, 'fvae'): + for n, m in self.model.fvae.named_children(): + num_params(m, model_name=f'fvae.{n}') + + def run_model(self, sample, infer=False, *args, **kwargs): + txt_tokens = sample['txt_tokens'] + word_tokens = sample['word_tokens'] + spk_embed = sample.get('spk_embed') + spk_id = sample.get('spk_ids') + if not infer: + output = self.model(txt_tokens, word_tokens, + ph2word=sample['ph2word'], + mel2word=sample['mel2word'], + mel2ph=sample['mel2ph'], + word_len=sample['word_lengths'].max(), + tgt_mels=sample['mels'], + pitch=sample.get('pitch'), + spk_embed=spk_embed, + spk_id=spk_id, + infer=False, + global_step=self.global_step) + losses = {} + losses['kl_v'] = output['kl'].detach() + losses_kl = output['kl'] + losses_kl = torch.clamp(losses_kl, min=hparams['kl_min']) + losses_kl = min(self.global_step / hparams['kl_start_steps'], 1) * losses_kl + losses_kl = losses_kl * hparams['lambda_kl'] + losses['kl'] = losses_kl + self.add_mel_loss(output['mel_out'], sample['mels'], losses) + if hparams['dur_level'] == 'word': + self.add_dur_loss( + output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses) + self.get_attn_stats(output['attn'], sample, losses) + else: + super(PortaSpeechTask, self).add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses) + return losses, output + else: + use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) + output = self.model( + txt_tokens, word_tokens, + ph2word=sample['ph2word'], + word_len=sample['word_lengths'].max(), + pitch=sample.get('pitch'), + mel2ph=sample['mel2ph'] if use_gt_dur else None, + mel2word=sample['mel2word'] if use_gt_dur else None, + tgt_mels=sample['mels'], + infer=True, + spk_embed=spk_embed, + spk_id=spk_id, + ) + return output + + def add_dur_loss(self, dur_pred, mel2token, word_len, txt_tokens, losses=None): + T = word_len.max() + dur_gt = mel2token_to_dur(mel2token, T).float() + nonpadding = (torch.arange(T).to(dur_pred.device)[None, :] < word_len[:, None]).float() + dur_pred = dur_pred * nonpadding + dur_gt = dur_gt * nonpadding + wdur = F.l1_loss((dur_pred + 1).log(), (dur_gt + 1).log(), reduction='none') + wdur = (wdur * nonpadding).sum() / nonpadding.sum() + if hparams['lambda_word_dur'] > 0: + losses['wdur'] = wdur * hparams['lambda_word_dur'] + if hparams['lambda_sent_dur'] > 0: + sent_dur_p = dur_pred.sum(-1) + sent_dur_g = dur_gt.sum(-1) + sdur_loss = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean') + losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] + + def validation_step(self, sample, batch_idx): + return super().validation_step(sample, batch_idx) + + def save_valid_result(self, sample, batch_idx, model_out): + super(PortaSpeechTask, self).save_valid_result(sample, batch_idx, model_out) + if self.global_step > 0 and hparams['dur_level'] == 'word': + self.logger.add_figure(f'attn_{batch_idx}', spec_to_figure(model_out['attn'][0]), self.global_step) + + def get_attn_stats(self, attn, sample, logging_outputs, prefix=''): + # diagonal_focus_rate + txt_lengths = sample['txt_lengths'].float() + mel_lengths = sample['mel_lengths'].float() + src_padding_mask = sample['txt_tokens'].eq(0) + target_padding_mask = sample['mels'].abs().sum(-1).eq(0) + src_seg_mask = sample['txt_tokens'].eq(self.seg_idx) + attn_ks = txt_lengths.float() / mel_lengths.float() + + focus_rate = get_focus_rate(attn, src_padding_mask, target_padding_mask).mean().data + phone_coverage_rate = get_phone_coverage_rate( + attn, src_padding_mask, src_seg_mask, target_padding_mask).mean() + diagonal_focus_rate, diag_mask = get_diagonal_focus_rate( + attn, attn_ks, mel_lengths, src_padding_mask, target_padding_mask) + logging_outputs[f'{prefix}fr'] = focus_rate.mean().data + logging_outputs[f'{prefix}pcr'] = phone_coverage_rate.mean().data + logging_outputs[f'{prefix}dfr'] = diagonal_focus_rate.mean().data + + def get_plot_dur_info(self, sample, model_out): + if hparams['dur_level'] == 'word': + T_txt = sample['word_lengths'].max() + dur_gt = mel2token_to_dur(sample['mel2word'], T_txt)[0] + dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt + txt = sample['ph_words'][0].split(" ") + else: + T_txt = sample['txt_tokens'].shape[1] + dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0] + dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt + txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy()) + txt = txt.split(" ") + return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt} + + def build_optimizer(self, model): + self.optimizer = torch.optim.AdamW( + self.model.parameters(), + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + return self.optimizer + + def build_scheduler(self, optimizer): + return FastSpeechTask.build_scheduler(self, optimizer) + + ############ + # infer + ############ + def test_start(self): + super().test_start() + if hparams.get('save_attn', False): + os.makedirs(f'{self.gen_dir}/attn', exist_ok=True) + self.model.store_inverse_all() + + def test_step(self, sample, batch_idx): + assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference' + outputs = self.run_model(sample, infer=True) + text = sample['text'][0] + item_name = sample['item_name'][0] + tokens = sample['txt_tokens'][0].cpu().numpy() + mel_gt = sample['mels'][0].cpu().numpy() + mel_pred = outputs['mel_out'][0].cpu().numpy() + mel2ph = sample['mel2ph'][0].cpu().numpy() + mel2ph_pred = None + str_phs = self.token_encoder.decode(tokens, strip_padding=True) + base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]' + if text is not None: + base_fn += text.replace(":", "$3A")[:80] + base_fn = base_fn.replace(' ', '_') + gen_dir = self.gen_dir + wav_pred = self.vocoder.spec2wav(mel_pred) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred]) + if hparams['save_gt']: + wav_gt = self.vocoder.spec2wav(mel_gt) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph]) + if hparams.get('save_attn', False): + attn = outputs['attn'][0].cpu().numpy() + np.save(f'{gen_dir}/attn/{item_name}.npy', attn) + print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}") + return { + 'item_name': item_name, + 'text': text, + 'ph_tokens': self.token_encoder.decode(tokens.tolist()), + 'wav_fn_pred': base_fn % 'P', + 'wav_fn_gt': base_fn % 'G', + } diff --git a/NeuralSeq/tasks/tts/ps_adv.py b/NeuralSeq/tasks/tts/ps_adv.py new file mode 100644 index 0000000000000000000000000000000000000000..fbc1e5133ddf26f2dfac598028b8e3db01ec638e --- /dev/null +++ b/NeuralSeq/tasks/tts/ps_adv.py @@ -0,0 +1,372 @@ +import os +import torch +import torch.nn.functional as F +import torch.nn as nn +import numpy as np + +from modules.portaspeech.portaspeech import PortaSpeech +from modules.syntaspeech.multi_window_disc import Discriminator +from tasks.tts.fs2 import FastSpeech2Task +from utils.hparams import hparams +from utils.tts_utils import get_focus_rate, get_phone_coverage_rate, get_diagonal_focus_rate, mel2token_to_dur +from utils import num_params, tensors_to_scalars +from utils.pitch_utils import denorm_f0, norm_f0 +from data_gen.tts.data_gen_utils import get_pitch +from utils.dtw import dtw as DTW + +from utils.plot import spec_to_figure +from utils.text.text_encoder import build_token_encoder + + +class PortaSpeechAdvTask(FastSpeech2Task): + def __init__(self): + super().__init__() + data_dir = hparams['binary_data_dir'] + self.word_encoder = build_token_encoder(f'{data_dir}/word_set.json') + self.build_disc_model() + self.mse_loss_fn = torch.nn.MSELoss() + + def build_tts_model(self): + ph_dict_size = len(self.token_encoder) + word_dict_size = len(self.word_encoder) + self.model = PortaSpeech(ph_dict_size, word_dict_size, hparams) + + self.gen_params = [p for p in self.model.parameters() if p.requires_grad] + self.dp_params = [p for k, p in self.model.named_parameters() if (('dur_predictor' in k) and p.requires_grad)] + self.gen_params_except_dp = [p for k, p in self.model.named_parameters() if (('dur_predictor' not in k) and p.requires_grad)] + self.bert_params = [p for k, p in self.model.named_parameters() if (('bert' in k) and p.requires_grad)] + self.gen_params_except_bert_and_dp = [p for k, p in self.model.named_parameters() if ('dur_predictor' not in k) and ('bert' not in k) and p.requires_grad ] + + self.use_bert = True if len(self.bert_params) > 0 else False + + def build_disc_model(self): + disc_win_num = hparams['disc_win_num'] + h = hparams['mel_disc_hidden_size'] + self.mel_disc = Discriminator( + time_lengths=[32, 64, 128][:disc_win_num], + freq_length=80, hidden_size=h, kernel=(3, 3) + ) + self.disc_params = list(self.mel_disc.parameters()) + + def on_train_start(self): + super().on_train_start() + for n, m in self.model.named_children(): + num_params(m, model_name=n) + if hasattr(self.model, 'fvae'): + for n, m in self.model.fvae.named_children(): + num_params(m, model_name=f'fvae.{n}') + + def _training_step(self, sample, batch_idx, optimizer_idx): + loss_output = {} + loss_weights = {} + disc_start = self.global_step >= hparams["disc_start_steps"] and hparams['lambda_mel_adv'] > 0 + if optimizer_idx == 0: + ####################### + # Generator # + ####################### + loss_output, model_out = self.run_model(sample, infer=False) + self.model_out_gt = self.model_out = \ + {k: v.detach() for k, v in model_out.items() if isinstance(v, torch.Tensor)} + if disc_start: + mel_p = model_out['mel_out'] + if hasattr(self.model, 'out2mel'): + mel_p = self.model.out2mel(mel_p) + o_ = self.mel_disc(mel_p) + p_, pc_ = o_['y'], o_['y_c'] + if p_ is not None: + loss_output['a'] = self.mse_loss_fn(p_, p_.new_ones(p_.size())) + loss_weights['a'] = hparams['lambda_mel_adv'] + if pc_ is not None: + loss_output['ac'] = self.mse_loss_fn(pc_, pc_.new_ones(pc_.size())) + loss_weights['ac'] = hparams['lambda_mel_adv'] + else: + ####################### + # Discriminator # + ####################### + if disc_start and self.global_step % hparams['disc_interval'] == 0: + model_out = self.model_out_gt + mel_g = sample['mels'] + mel_p = model_out['mel_out'] + o = self.mel_disc(mel_g) + p, pc = o['y'], o['y_c'] + o_ = self.mel_disc(mel_p) + p_, pc_ = o_['y'], o_['y_c'] + if p_ is not None: + loss_output["r"] = self.mse_loss_fn(p, p.new_ones(p.size())) + loss_output["f"] = self.mse_loss_fn(p_, p_.new_zeros(p_.size())) + if pc_ is not None: + loss_output["rc"] = self.mse_loss_fn(pc, pc.new_ones(pc.size())) + loss_output["fc"] = self.mse_loss_fn(pc_, pc_.new_zeros(pc_.size())) + total_loss = sum([loss_weights.get(k, 1) * v for k, v in loss_output.items() if isinstance(v, torch.Tensor) and v.requires_grad]) + loss_output['batch_size'] = sample['txt_tokens'].size()[0] + return total_loss, loss_output + + def run_model(self, sample, infer=False, *args, **kwargs): + txt_tokens = sample['txt_tokens'] + word_tokens = sample['word_tokens'] + spk_embed = sample.get('spk_embed') + spk_id = sample.get('spk_ids') + if not infer: + output = self.model(txt_tokens, word_tokens, + ph2word=sample['ph2word'], + mel2word=sample['mel2word'], + mel2ph=sample['mel2ph'], + word_len=sample['word_lengths'].max(), + tgt_mels=sample['mels'], + pitch=sample.get('pitch'), + spk_embed=spk_embed, + spk_id=spk_id, + infer=False, + global_step=self.global_step, + graph_lst=sample['graph_lst'], + etypes_lst=sample['etypes_lst'], + bert_feats=sample.get("bert_feats"), + cl_feats=sample.get("cl_feats") + ) + losses = {} + losses['kl_v'] = output['kl'].detach() + losses_kl = output['kl'] + losses_kl = torch.clamp(losses_kl, min=hparams['kl_min']) + losses_kl = min(self.global_step / hparams['kl_start_steps'], 1) * losses_kl + losses_kl = losses_kl * hparams['lambda_kl'] + losses['kl'] = losses_kl + + self.add_mel_loss(output['mel_out'], sample['mels'], losses) + if hparams['dur_level'] == 'word': + self.add_dur_loss( + output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses) + self.get_attn_stats(output['attn'], sample, losses) + else: + super(PortaSpeechAdvTask, self).add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses) + return losses, output + else: + use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) + output = self.model( + txt_tokens, word_tokens, + ph2word=sample['ph2word'], + word_len=sample['word_lengths'].max(), + pitch=sample.get('pitch'), + mel2ph=sample['mel2ph'] if use_gt_dur else None, + mel2word=sample['mel2word'] if use_gt_dur else None, + tgt_mels=sample['mels'], + infer=True, + spk_embed=spk_embed, + spk_id=spk_id, + graph_lst=sample['graph_lst'], + etypes_lst=sample['etypes_lst'], + bert_feats=sample.get("bert_feats"), + cl_feats=sample.get("cl_feats") + ) + return output + + def add_dur_loss(self, dur_pred, mel2token, word_len, txt_tokens, losses=None): + T = word_len.max() + dur_gt = mel2token_to_dur(mel2token, T).float() + nonpadding = (torch.arange(T).to(dur_pred.device)[None, :] < word_len[:, None]).float() + dur_pred = dur_pred * nonpadding + dur_gt = dur_gt * nonpadding + wdur = F.l1_loss((dur_pred + 1).log(), (dur_gt + 1).log(), reduction='none') + wdur = (wdur * nonpadding).sum() / nonpadding.sum() + + if hparams['lambda_word_dur'] > 0: + losses['wdur'] = wdur * hparams['lambda_word_dur'] + if hparams['lambda_sent_dur'] > 0: + sent_dur_p = dur_pred.sum(-1) + sent_dur_g = dur_gt.sum(-1) + sdur_loss = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean') + losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] + + with torch.no_grad(): + # calculate word-level abs_dur_error in micro-second + abs_word_dur_error = F.l1_loss(dur_pred , dur_gt, reduction='none') + abs_word_dur_error = (abs_word_dur_error * nonpadding).sum() / nonpadding.sum() + abs_word_dur_error = abs_word_dur_error * hparams['hop_size'] / hparams['audio_sample_rate'] * 1000 + losses['abs_word_dur_error'] = abs_word_dur_error + # calculate word-level abs_dur_error in second + sent_dur_p = dur_pred.sum(-1) + sent_dur_g = dur_gt.sum(-1) + abs_sent_dur_error = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean').mean() + abs_sent_dur_error = abs_sent_dur_error * hparams['hop_size'] / hparams['audio_sample_rate'] + losses['abs_sent_dur_error'] = abs_sent_dur_error + + def validation_step(self, sample, batch_idx): + outputs = {} + outputs['losses'] = {} + outputs['losses'], model_out = self.run_model(sample) + outputs['total_loss'] = sum(outputs['losses'].values()) + outputs['nsamples'] = sample['nsamples'] + outputs = tensors_to_scalars(outputs) + if self.global_step % hparams['valid_infer_interval'] == 0 \ + and batch_idx < hparams['num_valid_plots']: + valid_results = self.save_valid_result(sample, batch_idx, model_out) + wav_gt = valid_results['wav_gt'] + mel_gt = valid_results['mel_gt'] + wav_pred = valid_results['wav_pred'] + mel_pred = valid_results['mel_pred'] + f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams) + f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams) + manhattan_distance = lambda x, y: np.abs(x - y) + dist, cost, acc, path = DTW(f0_pred_, f0_gt_, manhattan_distance) + outputs['losses']['f0_dtw'] = dist / len(f0_gt_) + return outputs + + def save_valid_result(self, sample, batch_idx, model_out): + sr = hparams['audio_sample_rate'] + f0_gt = None + mel_out = model_out['mel_out'] + if sample.get('f0') is not None: + f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu()) + self.plot_mel(batch_idx, sample['mels'], mel_out, f0s=f0_gt) + + # if self.global_step > 0: + wav_pred = self.vocoder.spec2wav(mel_out[0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_val_{batch_idx}', wav_pred, self.global_step, sr) + # with gt duration + model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True) + dur_info = self.get_plot_dur_info(sample, model_out) + del dur_info['dur_pred'] + wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_gdur_{batch_idx}', + dur_info=dur_info, f0s=f0_gt) + + # with pred duration + if not hparams['use_gt_dur']: + model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False) + dur_info = self.get_plot_dur_info(sample, model_out) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_pdur_{batch_idx}', + dur_info=dur_info, f0s=f0_gt) + wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr) + # gt wav + mel_gt = sample['mels'][0].cpu() + wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt) + if self.global_step <= hparams['valid_infer_interval']: + self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr) + + # add attn plot + if self.global_step > 0 and hparams['dur_level'] == 'word': + self.logger.add_figure(f'attn_{batch_idx}', spec_to_figure(model_out['attn'][0]), self.global_step) + + return {'wav_gt': wav_gt, 'wav_pred': wav_pred, 'mel_gt': mel_gt, 'mel_pred': model_out['mel_out'][0].cpu()} + + def get_attn_stats(self, attn, sample, logging_outputs, prefix=''): + # diagonal_focus_rate + txt_lengths = sample['txt_lengths'].float() + mel_lengths = sample['mel_lengths'].float() + src_padding_mask = sample['txt_tokens'].eq(0) + target_padding_mask = sample['mels'].abs().sum(-1).eq(0) + src_seg_mask = sample['txt_tokens'].eq(self.seg_idx) + attn_ks = txt_lengths.float() / mel_lengths.float() + + focus_rate = get_focus_rate(attn, src_padding_mask, target_padding_mask).mean().data + phone_coverage_rate = get_phone_coverage_rate( + attn, src_padding_mask, src_seg_mask, target_padding_mask).mean() + diagonal_focus_rate, diag_mask = get_diagonal_focus_rate( + attn, attn_ks, mel_lengths, src_padding_mask, target_padding_mask) + logging_outputs[f'{prefix}fr'] = focus_rate.mean().data + logging_outputs[f'{prefix}pcr'] = phone_coverage_rate.mean().data + logging_outputs[f'{prefix}dfr'] = diagonal_focus_rate.mean().data + + def get_plot_dur_info(self, sample, model_out): + if hparams['dur_level'] == 'word': + T_txt = sample['word_lengths'].max() + dur_gt = mel2token_to_dur(sample['mel2word'], T_txt)[0] + dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt + txt = sample['ph_words'][0].split(" ") + else: + T_txt = sample['txt_tokens'].shape[1] + dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0] + dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt + txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy()) + txt = txt.split(" ") + return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt} + + def build_optimizer(self, model): + + optimizer_gen = torch.optim.AdamW( + self.gen_params, + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + + optimizer_disc = torch.optim.AdamW( + self.disc_params, + lr=hparams['disc_lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + **hparams["discriminator_optimizer_params"]) if len(self.disc_params) > 0 else None + + return [optimizer_gen, optimizer_disc] + + def build_scheduler(self, optimizer): + return [ + FastSpeechTask.build_scheduler(self, optimizer[0]), # Generator Scheduler + torch.optim.lr_scheduler.StepLR(optimizer=optimizer[1], # Discriminator Scheduler + **hparams["discriminator_scheduler_params"]), + ] + + def on_before_optimization(self, opt_idx): + if opt_idx == 0: + nn.utils.clip_grad_norm_(self.dp_params, hparams['clip_grad_norm']) + if self.use_bert: + nn.utils.clip_grad_norm_(self.bert_params, hparams['clip_grad_norm']) + nn.utils.clip_grad_norm_(self.gen_params_except_bert_and_dp, hparams['clip_grad_norm']) + else: + nn.utils.clip_grad_norm_(self.gen_params_except_dp, hparams['clip_grad_norm']) + else: + nn.utils.clip_grad_norm_(self.disc_params, hparams["clip_grad_norm"]) + + def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx): + if self.scheduler is not None: + self.scheduler[0].step(self.global_step // hparams['accumulate_grad_batches']) + self.scheduler[1].step(self.global_step // hparams['accumulate_grad_batches']) + + ############ + # infer + ############ + def test_start(self): + super().test_start() + if hparams.get('save_attn', False): + os.makedirs(f'{self.gen_dir}/attn', exist_ok=True) + self.model.store_inverse_all() + + def test_step(self, sample, batch_idx): + assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference' + outputs = self.run_model(sample, infer=True) + text = sample['text'][0] + item_name = sample['item_name'][0] + tokens = sample['txt_tokens'][0].cpu().numpy() + mel_gt = sample['mels'][0].cpu().numpy() + mel_pred = outputs['mel_out'][0].cpu().numpy() + mel2ph = sample['mel2ph'][0].cpu().numpy() + mel2ph_pred = None + str_phs = self.token_encoder.decode(tokens, strip_padding=True) + base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]' + if text is not None: + base_fn += text.replace(":", "$3A")[:80] + base_fn = base_fn.replace(' ', '_') + gen_dir = self.gen_dir + wav_pred = self.vocoder.spec2wav(mel_pred) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred]) + if hparams['save_gt']: + wav_gt = self.vocoder.spec2wav(mel_gt) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph]) + if hparams.get('save_attn', False): + attn = outputs['attn'][0].cpu().numpy() + np.save(f'{gen_dir}/attn/{item_name}.npy', attn) + # save f0 for pitch dtw + f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams) + f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams) + np.save(f'{gen_dir}/f0/{item_name}.npy', f0_pred_) + np.save(f'{gen_dir}/f0/{item_name}_gt.npy', f0_gt_) + + print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}") + return { + 'item_name': item_name, + 'text': text, + 'ph_tokens': self.token_encoder.decode(tokens.tolist()), + 'wav_fn_pred': base_fn % 'P', + 'wav_fn_gt': base_fn % 'G', + } diff --git a/NeuralSeq/tasks/tts/ps_flow.py b/NeuralSeq/tasks/tts/ps_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..311c1cbf27dd90ac0bd929e2a5d17c928e2dce94 --- /dev/null +++ b/NeuralSeq/tasks/tts/ps_flow.py @@ -0,0 +1,135 @@ +import torch +from modules.portaspeech.portaspeech_flow import PortaSpeechFlow +from tasks.tts.fs2 import FastSpeech2Task +from tasks.tts.ps import PortaSpeechTask +from utils.pitch_utils import denorm_f0 +from utils.hparams import hparams + + +class PortaSpeechFlowTask(PortaSpeechTask): + def __init__(self): + super().__init__() + self.training_post_glow = False + + def build_tts_model(self): + ph_dict_size = len(self.token_encoder) + word_dict_size = len(self.word_encoder) + self.model = PortaSpeechFlow(ph_dict_size, word_dict_size, hparams) + + def _training_step(self, sample, batch_idx, opt_idx): + self.training_post_glow = self.global_step >= hparams['post_glow_training_start'] \ + and hparams['use_post_flow'] + if hparams['two_stage'] and \ + ((opt_idx == 0 and self.training_post_glow) or (opt_idx == 1 and not self.training_post_glow)): + return None + loss_output, _ = self.run_model(sample) + total_loss = sum([v for v in loss_output.values() if isinstance(v, torch.Tensor) and v.requires_grad]) + loss_output['batch_size'] = sample['txt_tokens'].size()[0] + if 'postflow' in loss_output and loss_output['postflow'] is None: + return None + return total_loss, loss_output + + def run_model(self, sample, infer=False, *args, **kwargs): + if not infer: + training_post_glow = self.training_post_glow + spk_embed = sample.get('spk_embed') + spk_id = sample.get('spk_ids') + output = self.model(sample['txt_tokens'], + sample['word_tokens'], + ph2word=sample['ph2word'], + mel2word=sample['mel2word'], + mel2ph=sample['mel2ph'], + word_len=sample['word_lengths'].max(), + tgt_mels=sample['mels'], + pitch=sample.get('pitch'), + spk_embed=spk_embed, + spk_id=spk_id, + infer=False, + forward_post_glow=training_post_glow, + two_stage=hparams['two_stage'], + global_step=self.global_step, + bert_feats=sample.get('bert_feats')) + losses = {} + self.add_mel_loss(output['mel_out'], sample['mels'], losses) + if (training_post_glow or not hparams['two_stage']) and hparams['use_post_flow']: + losses['postflow'] = output['postflow'] + losses['l1'] = losses['l1'].detach() + losses['ssim'] = losses['ssim'].detach() + if not training_post_glow or not hparams['two_stage'] or not self.training: + losses['kl'] = output['kl'] + if self.global_step < hparams['kl_start_steps']: + losses['kl'] = losses['kl'].detach() + else: + losses['kl'] = torch.clamp(losses['kl'], min=hparams['kl_min']) + losses['kl'] = losses['kl'] * hparams['lambda_kl'] + if hparams['dur_level'] == 'word': + self.add_dur_loss( + output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses) + self.get_attn_stats(output['attn'], sample, losses) + else: + super().add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses) + return losses, output + else: + use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) + forward_post_glow = self.global_step >= hparams['post_glow_training_start'] + 1000 \ + and hparams['use_post_flow'] + spk_embed = sample.get('spk_embed') + spk_id = sample.get('spk_ids') + output = self.model( + sample['txt_tokens'], + sample['word_tokens'], + ph2word=sample['ph2word'], + word_len=sample['word_lengths'].max(), + pitch=sample.get('pitch'), + mel2ph=sample['mel2ph'] if use_gt_dur else None, + mel2word=sample['mel2word'] if hparams['profile_infer'] or hparams['use_gt_dur'] else None, + infer=True, + forward_post_glow=forward_post_glow, + spk_embed=spk_embed, + spk_id=spk_id, + two_stage=hparams['two_stage'], + bert_feats=sample.get('bert_feats')) + return output + + def validation_step(self, sample, batch_idx): + self.training_post_glow = self.global_step >= hparams['post_glow_training_start'] \ + and hparams['use_post_flow'] + return super().validation_step(sample, batch_idx) + + def save_valid_result(self, sample, batch_idx, model_out): + super(PortaSpeechFlowTask, self).save_valid_result(sample, batch_idx, model_out) + sr = hparams['audio_sample_rate'] + f0_gt = None + if sample.get('f0') is not None: + f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu()) + if self.global_step > 0: + # save FVAE result + if hparams['use_post_flow']: + wav_pred = self.vocoder.spec2wav(model_out['mel_out_fvae'][0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_fvae_{batch_idx}', wav_pred, self.global_step, sr) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out_fvae'][0], + f'mel_fvae_{batch_idx}', f0s=f0_gt) + + def build_optimizer(self, model): + if hparams['two_stage'] and hparams['use_post_flow']: + self.optimizer = torch.optim.AdamW( + [p for name, p in self.model.named_parameters() if 'post_flow' not in name], + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + self.post_flow_optimizer = torch.optim.AdamW( + self.model.post_flow.parameters(), + lr=hparams['post_flow_lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + return [self.optimizer, self.post_flow_optimizer] + else: + self.optimizer = torch.optim.AdamW( + self.model.parameters(), + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + return [self.optimizer] + + def build_scheduler(self, optimizer): + return FastSpeech2Task.build_scheduler(self, optimizer[0]) \ No newline at end of file diff --git a/NeuralSeq/tasks/tts/synta.py b/NeuralSeq/tasks/tts/synta.py new file mode 100644 index 0000000000000000000000000000000000000000..62cf865bc7d17c62b2c0c71f21d5b0ab596ba312 --- /dev/null +++ b/NeuralSeq/tasks/tts/synta.py @@ -0,0 +1,25 @@ +import os +import torch +import torch.nn.functional as F +from torch import nn + +from modules.tts.syntaspeech.syntaspeech import SyntaSpeech +from tasks.tts.ps_adv import PortaSpeechAdvTask +from utils.hparams import hparams + + +class SyntaSpeechTask(PortaSpeechAdvTask): + def build_tts_model(self): + ph_dict_size = len(self.token_encoder) + word_dict_size = len(self.word_encoder) + self.model = SyntaSpeech(ph_dict_size, word_dict_size, hparams) + + self.gen_params = [p for p in self.model.parameters() if p.requires_grad] + self.dp_params = [p for k, p in self.model.named_parameters() if (('dur_predictor' in k) and p.requires_grad)] + self.gen_params_except_dp = [p for k, p in self.model.named_parameters() if (('dur_predictor' not in k) and p.requires_grad)] + self.bert_params = [p for k, p in self.model.named_parameters() if (('bert' in k) and p.requires_grad)] + self.gen_params_except_bert_and_dp = [p for k, p in self.model.named_parameters() if ('dur_predictor' not in k) and ('bert' not in k) and p.requires_grad ] + + self.use_bert = True if len(self.bert_params) > 0 else False + + \ No newline at end of file diff --git a/NeuralSeq/tasks/tts/tts.py b/NeuralSeq/tasks/tts/tts.py new file mode 100644 index 0000000000000000000000000000000000000000..f803c1e738137cb1eca19a1943196abd2884c0a5 --- /dev/null +++ b/NeuralSeq/tasks/tts/tts.py @@ -0,0 +1,131 @@ +from multiprocessing.pool import Pool + +import matplotlib + +from utils.pl_utils import data_loader +from utils.training_utils import RSQRTSchedule +from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder +from modules.fastspeech.pe import PitchExtractor + +matplotlib.use('Agg') +import os +import numpy as np +from tqdm import tqdm +import torch.distributed as dist + +from tasks.base_task import BaseTask +from utils.hparams import hparams +from utils.text_encoder import TokenTextEncoder +import json + +import torch +import torch.optim +import torch.utils.data +import utils + + + +class TtsTask(BaseTask): + def __init__(self, *args, **kwargs): + self.vocoder = None + self.phone_encoder = self.build_phone_encoder(hparams['binary_data_dir']) + self.padding_idx = self.phone_encoder.pad() + self.eos_idx = self.phone_encoder.eos() + self.seg_idx = self.phone_encoder.seg() + self.saving_result_pool = None + self.saving_results_futures = None + self.stats = {} + super().__init__(*args, **kwargs) + + def build_scheduler(self, optimizer): + return RSQRTSchedule(optimizer) + + def build_optimizer(self, model): + self.optimizer = optimizer = torch.optim.AdamW( + model.parameters(), + lr=hparams['lr']) + return optimizer + + def build_dataloader(self, dataset, shuffle, max_tokens=None, max_sentences=None, + required_batch_size_multiple=-1, endless=False, batch_by_size=True): + devices_cnt = torch.cuda.device_count() + if devices_cnt == 0: + devices_cnt = 1 + if required_batch_size_multiple == -1: + required_batch_size_multiple = devices_cnt + + def shuffle_batches(batches): + np.random.shuffle(batches) + return batches + + if max_tokens is not None: + max_tokens *= devices_cnt + if max_sentences is not None: + max_sentences *= devices_cnt + indices = dataset.ordered_indices() + if batch_by_size: + batch_sampler = utils.batch_by_size( + indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences, + required_batch_size_multiple=required_batch_size_multiple, + ) + else: + batch_sampler = [] + for i in range(0, len(indices), max_sentences): + batch_sampler.append(indices[i:i + max_sentences]) + + if shuffle: + batches = shuffle_batches(list(batch_sampler)) + if endless: + batches = [b for _ in range(1000) for b in shuffle_batches(list(batch_sampler))] + else: + batches = batch_sampler + if endless: + batches = [b for _ in range(1000) for b in batches] + num_workers = dataset.num_workers + if self.trainer.use_ddp: + num_replicas = dist.get_world_size() + rank = dist.get_rank() + batches = [x[rank::num_replicas] for x in batches if len(x) % num_replicas == 0] + return torch.utils.data.DataLoader(dataset, + collate_fn=dataset.collater, + batch_sampler=batches, + num_workers=num_workers, + pin_memory=False) + + def build_phone_encoder(self, data_dir): + phone_list_file = os.path.join(data_dir, 'phone_set.json') + + phone_list = json.load(open(phone_list_file)) + return TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',') + + def build_optimizer(self, model): + self.optimizer = optimizer = torch.optim.AdamW( + model.parameters(), + lr=hparams['lr']) + return optimizer + + def test_start(self): + self.saving_result_pool = Pool(8) + self.saving_results_futures = [] + self.vocoder: BaseVocoder = get_vocoder_cls(hparams)() + if hparams.get('pe_enable') is not None and hparams['pe_enable']: + self.pe = PitchExtractor().cuda() + utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True) + self.pe.eval() + def test_end(self, outputs): + self.saving_result_pool.close() + [f.get() for f in tqdm(self.saving_results_futures)] + self.saving_result_pool.join() + return {} + + ########## + # utils + ########## + def weights_nonzero_speech(self, target): + # target : B x T x mel + # Assign weight 1.0 to all labels except for padding (id=0). + dim = target.size(-1) + return target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim) + +if __name__ == '__main__': + TtsTask.start() diff --git a/NeuralSeq/tasks/tts/tts_base.py b/NeuralSeq/tasks/tts/tts_base.py new file mode 100644 index 0000000000000000000000000000000000000000..509740b54dbf23db6bafebd6bc46089ee83cf499 --- /dev/null +++ b/NeuralSeq/tasks/tts/tts_base.py @@ -0,0 +1,305 @@ +import filecmp + +import matplotlib + +from utils.plot import spec_to_figure + +matplotlib.use('Agg') + +from data_gen.tts.data_gen_utils import get_pitch +from modules.fastspeech.tts_modules import mel2ph_to_dur +from tasks.tts.dataset_utils import BaseTTSDataset +from utils.tts_utils import sequence_mask +from multiprocessing.pool import Pool +from tasks.base_task import data_loader, BaseConcatDataset +from utils.common_schedulers import RSQRTSchedule, NoneSchedule +from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder +import os +import numpy as np +from tqdm import tqdm +import torch.distributed as dist +from tasks.base_task import BaseTask +from utils.hparams import hparams +from utils.text_encoder import TokenTextEncoder +import json +import matplotlib.pyplot as plt +import torch +import torch.optim +import torch.utils.data +import utils +from utils import audio +import pandas as pd + + +class TTSBaseTask(BaseTask): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.dataset_cls = BaseTTSDataset + self.max_tokens = hparams['max_tokens'] + self.max_sentences = hparams['max_sentences'] + self.max_valid_tokens = hparams['max_valid_tokens'] + if self.max_valid_tokens == -1: + hparams['max_valid_tokens'] = self.max_valid_tokens = self.max_tokens + self.max_valid_sentences = hparams['max_valid_sentences'] + if self.max_valid_sentences == -1: + hparams['max_valid_sentences'] = self.max_valid_sentences = self.max_sentences + self.vocoder = None + self.phone_encoder = self.build_phone_encoder(hparams['binary_data_dir']) + self.padding_idx = self.phone_encoder.pad() + self.eos_idx = self.phone_encoder.eos() + self.seg_idx = self.phone_encoder.seg() + self.saving_result_pool = None + self.saving_results_futures = None + self.stats = {} + + @data_loader + def train_dataloader(self): + if hparams['train_sets'] != '': + train_sets = hparams['train_sets'].split("|") + # check if all train_sets have the same spk map and dictionary + binary_data_dir = hparams['binary_data_dir'] + file_to_cmp = ['phone_set.json'] + if os.path.exists(f'{binary_data_dir}/word_set.json'): + file_to_cmp.append('word_set.json') + if hparams['use_spk_id']: + file_to_cmp.append('spk_map.json') + for f in file_to_cmp: + for ds_name in train_sets: + base_file = os.path.join(binary_data_dir, f) + ds_file = os.path.join(ds_name, f) + assert filecmp.cmp(base_file, ds_file), \ + f'{f} in {ds_name} is not same with that in {binary_data_dir}.' + train_dataset = BaseConcatDataset([ + self.dataset_cls(prefix='train', shuffle=True, data_dir=ds_name) for ds_name in train_sets]) + else: + train_dataset = self.dataset_cls(prefix=hparams['train_set_name'], shuffle=True) + return self.build_dataloader(train_dataset, True, self.max_tokens, self.max_sentences, + endless=hparams['endless_ds']) + + @data_loader + def val_dataloader(self): + valid_dataset = self.dataset_cls(prefix=hparams['valid_set_name'], shuffle=False) + return self.build_dataloader(valid_dataset, False, self.max_valid_tokens, self.max_valid_sentences) + + @data_loader + def test_dataloader(self): + test_dataset = self.dataset_cls(prefix=hparams['test_set_name'], shuffle=False) + self.test_dl = self.build_dataloader( + test_dataset, False, self.max_valid_tokens, + self.max_valid_sentences, batch_by_size=False) + return self.test_dl + + def build_dataloader(self, dataset, shuffle, max_tokens=None, max_sentences=None, + required_batch_size_multiple=-1, endless=False, batch_by_size=True): + devices_cnt = torch.cuda.device_count() + if devices_cnt == 0: + devices_cnt = 1 + if required_batch_size_multiple == -1: + required_batch_size_multiple = devices_cnt + + def shuffle_batches(batches): + np.random.shuffle(batches) + return batches + + if max_tokens is not None: + max_tokens *= devices_cnt + if max_sentences is not None: + max_sentences *= devices_cnt + indices = dataset.ordered_indices() + if batch_by_size: + batch_sampler = utils.batch_by_size( + indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences, + required_batch_size_multiple=required_batch_size_multiple, + ) + else: + batch_sampler = [] + for i in range(0, len(indices), max_sentences): + batch_sampler.append(indices[i:i + max_sentences]) + + if shuffle: + batches = shuffle_batches(list(batch_sampler)) + if endless: + batches = [b for _ in range(1000) for b in shuffle_batches(list(batch_sampler))] + else: + batches = batch_sampler + if endless: + batches = [b for _ in range(1000) for b in batches] + num_workers = dataset.num_workers + if self.trainer.use_ddp: + num_replicas = dist.get_world_size() + rank = dist.get_rank() + batches = [x[rank::num_replicas] for x in batches if len(x) % num_replicas == 0] + return torch.utils.data.DataLoader(dataset, + collate_fn=dataset.collater, + batch_sampler=batches, + num_workers=num_workers, + pin_memory=False) + + def build_phone_encoder(self, data_dir): + phone_list_file = os.path.join(data_dir, 'phone_set.json') + phone_list = json.load(open(phone_list_file)) + return TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',') + + def build_scheduler(self, optimizer): + if hparams['scheduler'] == 'rsqrt': + return RSQRTSchedule(optimizer) + else: + return NoneSchedule(optimizer) + + def build_optimizer(self, model): + self.optimizer = optimizer = torch.optim.AdamW( + model.parameters(), + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + return optimizer + + def plot_mel(self, batch_idx, spec, spec_out, name=None): + spec_cat = torch.cat([spec, spec_out], -1) + name = f'mel_{batch_idx}' if name is None else name + vmin = hparams['mel_vmin'] + vmax = hparams['mel_vmax'] + self.logger.add_figure(name, spec_to_figure(spec_cat[0], vmin, vmax), self.global_step) + + def test_start(self): + self.saving_result_pool = Pool(min(int(os.getenv('N_PROC', os.cpu_count())), 16)) + self.saving_results_futures = [] + self.results_id = 0 + self.gen_dir = os.path.join( + hparams['work_dir'], + f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}') + self.vocoder: BaseVocoder = get_vocoder_cls(hparams)() + + def after_infer(self, predictions, sil_start_frame=0): + predictions = utils.unpack_dict_to_list(predictions) + assert len(predictions) == 1, 'Only support batch_size=1 in inference.' + prediction = predictions[0] + prediction = utils.tensors_to_np(prediction) + item_name = prediction.get('item_name') + text = prediction.get('text') + ph_tokens = prediction.get('txt_tokens') + mel_gt = prediction["mels"] + mel2ph_gt = prediction.get("mel2ph") + mel2ph_gt = mel2ph_gt if mel2ph_gt is not None else None + mel_pred = prediction["outputs"] + mel2ph_pred = prediction.get("mel2ph_pred") + f0_gt = prediction.get("f0") + f0_pred = prediction.get("f0_pred") + + str_phs = None + if self.phone_encoder is not None and 'txt_tokens' in prediction: + str_phs = self.phone_encoder.decode(prediction['txt_tokens'], strip_padding=True) + + if 'encdec_attn' in prediction: + encdec_attn = prediction['encdec_attn'] + encdec_attn = encdec_attn[encdec_attn.max(-1).sum(-1).argmax(-1)] + txt_lengths = prediction.get('txt_lengths') + encdec_attn = encdec_attn.T[:txt_lengths, :len(mel_gt)] + else: + encdec_attn = None + + wav_pred = self.vocoder.spec2wav(mel_pred, f0=f0_pred) + wav_pred[:sil_start_frame * hparams['hop_size']] = 0 + gen_dir = self.gen_dir + base_fn = f'[{self.results_id:06d}][{item_name}][%s]' + # if text is not None: + # base_fn += text.replace(":", "%3A")[:80] + base_fn = base_fn.replace(' ', '_') + if not hparams['profile_infer']: + os.makedirs(gen_dir, exist_ok=True) + os.makedirs(f'{gen_dir}/wavs', exist_ok=True) + os.makedirs(f'{gen_dir}/plot', exist_ok=True) + if hparams.get('save_mel_npy', False): + os.makedirs(f'{gen_dir}/npy', exist_ok=True) + if 'encdec_attn' in prediction: + os.makedirs(f'{gen_dir}/attn_plot', exist_ok=True) + self.saving_results_futures.append( + self.saving_result_pool.apply_async(self.save_result, args=[ + wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred, encdec_attn])) + + if mel_gt is not None and hparams['save_gt']: + wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt) + self.saving_results_futures.append( + self.saving_result_pool.apply_async(self.save_result, args=[ + wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph_gt])) + if hparams['save_f0']: + import matplotlib.pyplot as plt + f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams) + f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams) + fig = plt.figure() + plt.plot(f0_pred_, label=r'$\hat{f_0}$') + plt.plot(f0_gt_, label=r'$f_0$') + plt.legend() + plt.tight_layout() + plt.savefig(f'{gen_dir}/plot/[F0][{item_name}]{text}.png', format='png') + plt.close(fig) + print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}") + self.results_id += 1 + return { + 'item_name': item_name, + 'text': text, + 'ph_tokens': self.phone_encoder.decode(ph_tokens.tolist()), + 'wav_fn_pred': base_fn % 'P', + 'wav_fn_gt': base_fn % 'G', + } + + @staticmethod + def save_result(wav_out, mel, base_fn, gen_dir, str_phs=None, mel2ph=None, alignment=None): + audio.save_wav(wav_out, f'{gen_dir}/wavs/{base_fn}.wav', hparams['audio_sample_rate'], + norm=hparams['out_wav_norm']) + fig = plt.figure(figsize=(14, 10)) + spec_vmin = hparams['mel_vmin'] + spec_vmax = hparams['mel_vmax'] + heatmap = plt.pcolor(mel.T, vmin=spec_vmin, vmax=spec_vmax) + fig.colorbar(heatmap) + f0, _ = get_pitch(wav_out, mel, hparams) + f0 = f0 / 10 * (f0 > 0) + plt.plot(f0, c='white', linewidth=1, alpha=0.6) + if mel2ph is not None and str_phs is not None: + decoded_txt = str_phs.split(" ") + dur = mel2ph_to_dur(torch.LongTensor(mel2ph)[None, :], len(decoded_txt))[0].numpy() + dur = [0] + list(np.cumsum(dur)) + for i in range(len(dur) - 1): + shift = (i % 20) + 1 + plt.text(dur[i], shift, decoded_txt[i]) + plt.hlines(shift, dur[i], dur[i + 1], colors='b' if decoded_txt[i] != '|' else 'black') + plt.vlines(dur[i], 0, 5, colors='b' if decoded_txt[i] != '|' else 'black', + alpha=1, linewidth=1) + plt.tight_layout() + plt.savefig(f'{gen_dir}/plot/{base_fn}.png', format='png') + plt.close(fig) + if hparams.get('save_mel_npy', False): + np.save(f'{gen_dir}/npy/{base_fn}', mel) + if alignment is not None: + fig, ax = plt.subplots(figsize=(12, 16)) + im = ax.imshow(alignment, aspect='auto', origin='lower', + interpolation='none') + decoded_txt = str_phs.split(" ") + ax.set_yticks(np.arange(len(decoded_txt))) + ax.set_yticklabels(list(decoded_txt), fontsize=6) + fig.colorbar(im, ax=ax) + fig.savefig(f'{gen_dir}/attn_plot/{base_fn}_attn.png', format='png') + plt.close(fig) + + def test_end(self, outputs): + pd.DataFrame(outputs).to_csv(f'{self.gen_dir}/meta.csv') + self.saving_result_pool.close() + [f.get() for f in tqdm(self.saving_results_futures)] + self.saving_result_pool.join() + return {} + + ########## + # utils + ########## + def weights_nonzero_speech(self, target): + # target : B x T x mel + # Assign weight 1.0 to all labels except for padding (id=0). + dim = target.size(-1) + return target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim) + + def make_stop_target(self, target): + # target : B x T x mel + seq_mask = target.abs().sum(-1).ne(0).float() + seq_length = seq_mask.sum(1) + mask_r = 1 - sequence_mask(seq_length - 1, target.size(1)).float() + return seq_mask, mask_r diff --git a/NeuralSeq/tasks/tts/tts_utils.py b/NeuralSeq/tasks/tts/tts_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e13439ee72e4fda220605c5868b3159110d9129b --- /dev/null +++ b/NeuralSeq/tasks/tts/tts_utils.py @@ -0,0 +1,54 @@ +import importlib + +from data_gen.tts.base_binarizer import BaseBinarizer +from data_gen.tts.base_preprocess import BasePreprocessor +from data_gen.tts.txt_processors.base_text_processor import get_txt_processor_cls +from utils.hparams import hparams + + +def parse_dataset_configs(): + max_tokens = hparams['max_tokens'] + max_sentences = hparams['max_sentences'] + max_valid_tokens = hparams['max_valid_tokens'] + if max_valid_tokens == -1: + hparams['max_valid_tokens'] = max_valid_tokens = max_tokens + max_valid_sentences = hparams['max_valid_sentences'] + if max_valid_sentences == -1: + hparams['max_valid_sentences'] = max_valid_sentences = max_sentences + return max_tokens, max_sentences, max_valid_tokens, max_valid_sentences + + +def parse_mel_losses(): + mel_losses = hparams['mel_losses'].split("|") + loss_and_lambda = {} + for i, l in enumerate(mel_losses): + if l == '': + continue + if ':' in l: + l, lbd = l.split(":") + lbd = float(lbd) + else: + lbd = 1.0 + loss_and_lambda[l] = lbd + print("| Mel losses:", loss_and_lambda) + return loss_and_lambda + + +def load_data_preprocessor(): + preprocess_cls = hparams["preprocess_cls"] + pkg = ".".join(preprocess_cls.split(".")[:-1]) + cls_name = preprocess_cls.split(".")[-1] + preprocessor: BasePreprocessor = getattr(importlib.import_module(pkg), cls_name)() + preprocess_args = {} + preprocess_args.update(hparams['preprocess_args']) + return preprocessor, preprocess_args + + +def load_data_binarizer(): + binarizer_cls = hparams['binarizer_cls'] + pkg = ".".join(binarizer_cls.split(".")[:-1]) + cls_name = binarizer_cls.split(".")[-1] + binarizer: BaseBinarizer = getattr(importlib.import_module(pkg), cls_name)() + binarization_args = {} + binarization_args.update(hparams['binarization_args']) + return binarizer, binarization_args \ No newline at end of file diff --git a/NeuralSeq/tasks/vocoder/dataset_utils.py b/NeuralSeq/tasks/vocoder/dataset_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..05dcdaa524efde31575dd30b57b627d22744b53c --- /dev/null +++ b/NeuralSeq/tasks/vocoder/dataset_utils.py @@ -0,0 +1,204 @@ +import glob +import importlib +import os +from resemblyzer import VoiceEncoder +import numpy as np +import torch +import torch.distributed as dist +from torch.utils.data import DistributedSampler +import utils +from tasks.base_task import BaseDataset +from utils.hparams import hparams +from utils.indexed_datasets import IndexedDataset +from tqdm import tqdm + +class EndlessDistributedSampler(DistributedSampler): + def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.shuffle = shuffle + + g = torch.Generator() + g.manual_seed(self.epoch) + if self.shuffle: + indices = [i for _ in range(1000) for i in torch.randperm( + len(self.dataset), generator=g).tolist()] + else: + indices = [i for _ in range(1000) for i in list(range(len(self.dataset)))] + indices = indices[:len(indices) // self.num_replicas * self.num_replicas] + indices = indices[self.rank::self.num_replicas] + self.indices = indices + + def __iter__(self): + return iter(self.indices) + + def __len__(self): + return len(self.indices) + + +class VocoderDataset(BaseDataset): + def __init__(self, prefix, shuffle=False): + super().__init__(shuffle) + self.hparams = hparams + self.prefix = prefix + self.data_dir = hparams['binary_data_dir'] + self.is_infer = prefix == 'test' + self.batch_max_frames = 0 if self.is_infer else hparams['max_samples'] // hparams['hop_size'] + self.aux_context_window = hparams['aux_context_window'] + self.hop_size = hparams['hop_size'] + if self.is_infer and hparams['test_input_dir'] != '': + self.indexed_ds, self.sizes = self.load_test_inputs(hparams['test_input_dir']) + self.avail_idxs = [i for i, _ in enumerate(self.sizes)] + elif self.is_infer and hparams['test_mel_dir'] != '': + self.indexed_ds, self.sizes = self.load_mel_inputs(hparams['test_mel_dir']) + self.avail_idxs = [i for i, _ in enumerate(self.sizes)] + else: + self.indexed_ds = None + self.sizes = np.load(f'{self.data_dir}/{self.prefix}_lengths.npy') + self.avail_idxs = [idx for idx, s in enumerate(self.sizes) if + s - 2 * self.aux_context_window > self.batch_max_frames] + print(f"| {len(self.sizes) - len(self.avail_idxs)} short items are skipped in {prefix} set.") + self.sizes = [s for idx, s in enumerate(self.sizes) if + s - 2 * self.aux_context_window > self.batch_max_frames] + + def _get_item(self, index): + if self.indexed_ds is None: + self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}') + item = self.indexed_ds[index] + return item + + def __getitem__(self, index): + index = self.avail_idxs[index] + item = self._get_item(index) + sample = { + "id": index, + "item_name": item['item_name'], + "mel": torch.FloatTensor(item['mel']), + "wav": torch.FloatTensor(item['wav'].astype(np.float32)), + } + if 'pitch' in item: + sample['pitch'] = torch.LongTensor(item['pitch']) + sample['f0'] = torch.FloatTensor(item['f0']) + + if hparams.get('use_spk_embed', False): + sample["spk_embed"] = torch.Tensor(item['spk_embed']) + if hparams.get('use_emo_embed', False): + sample["emo_embed"] = torch.Tensor(item['emo_embed']) + + return sample + + def collater(self, batch): + if len(batch) == 0: + return {} + + y_batch, c_batch, p_batch, f0_batch = [], [], [], [] + item_name = [] + have_pitch = 'pitch' in batch[0] + for idx in range(len(batch)): + item_name.append(batch[idx]['item_name']) + x, c = batch[idx]['wav'] if self.hparams['use_wav'] else None, batch[idx]['mel'].squeeze(0) + if have_pitch: + p = batch[idx]['pitch'] + f0 = batch[idx]['f0'] + if self.hparams['use_wav']:self._assert_ready_for_upsampling(x, c, self.hop_size, 0) + if len(c) - 2 * self.aux_context_window > self.batch_max_frames: + # randomly pickup with the batch_max_steps length of the part + batch_max_frames = self.batch_max_frames if self.batch_max_frames != 0 else len( + c) - 2 * self.aux_context_window - 1 + batch_max_steps = batch_max_frames * self.hop_size + interval_start = self.aux_context_window + interval_end = len(c) - batch_max_frames - self.aux_context_window + start_frame = np.random.randint(interval_start, interval_end) + start_step = start_frame * self.hop_size + if self.hparams['use_wav']:y = x[start_step: start_step + batch_max_steps] + c = c[start_frame - self.aux_context_window: + start_frame + self.aux_context_window + batch_max_frames] + if have_pitch: + p = p[start_frame - self.aux_context_window: + start_frame + self.aux_context_window + batch_max_frames] + f0 = f0[start_frame - self.aux_context_window: + start_frame + self.aux_context_window + batch_max_frames] + if self.hparams['use_wav']:self._assert_ready_for_upsampling(y, c, self.hop_size, self.aux_context_window) + else: + print(f"Removed short sample from batch (length={len(x)}).") + continue + if self.hparams['use_wav']:y_batch += [y.reshape(-1, 1)] # [(T, 1), (T, 1), ...] + c_batch += [c] # [(T' C), (T' C), ...] + if have_pitch: + p_batch += [p] # [(T' C), (T' C), ...] + f0_batch += [f0] # [(T' C), (T' C), ...] + + # convert each batch to tensor, asuume that each item in batch has the same length + if self.hparams['use_wav']:y_batch = utils.collate_2d(y_batch, 0).transpose(2, 1) # (B, 1, T) + c_batch = utils.collate_2d(c_batch, 0).transpose(2, 1) # (B, C, T') + if have_pitch: + p_batch = utils.collate_1d(p_batch, 0) # (B, T') + f0_batch = utils.collate_1d(f0_batch, 0) # (B, T') + else: + p_batch, f0_batch = None, None + + # make input noise signal batch tensor + if self.hparams['use_wav']: z_batch = torch.randn(y_batch.size()) # (B, 1, T) + else: z_batch=[] + return { + 'z': z_batch, + 'mels': c_batch, + 'wavs': y_batch, + 'pitches': p_batch, + 'f0': f0_batch, + 'item_name': item_name + } + + @staticmethod + def _assert_ready_for_upsampling(x, c, hop_size, context_window): + """Assert the audio and feature lengths are correctly adjusted for upsamping.""" + assert len(x) == (len(c) - 2 * context_window) * hop_size + + def load_test_inputs(self, test_input_dir, spk_id=0): + inp_wav_paths = sorted(glob.glob(f'{test_input_dir}/*.wav') + glob.glob(f'{test_input_dir}/**/*.mp3')) + sizes = [] + items = [] + + binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizer.BaseBinarizer') + pkg = ".".join(binarizer_cls.split(".")[:-1]) + cls_name = binarizer_cls.split(".")[-1] + binarizer_cls = getattr(importlib.import_module(pkg), cls_name) + binarization_args = hparams['binarization_args'] + + for wav_fn in inp_wav_paths: + item_name = wav_fn[len(test_input_dir) + 1:].replace("/", "_") + item = binarizer_cls.process_item( + item_name, wav_fn, binarization_args) + items.append(item) + sizes.append(item['len']) + return items, sizes + + def load_mel_inputs(self, test_input_dir, spk_id=0): + inp_mel_paths = sorted(glob.glob(f'{test_input_dir}/*.npy')) + sizes = [] + items = [] + + binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizer.BaseBinarizer') + pkg = ".".join(binarizer_cls.split(".")[:-1]) + cls_name = binarizer_cls.split(".")[-1] + binarizer_cls = getattr(importlib.import_module(pkg), cls_name) + binarization_args = hparams['binarization_args'] + + for mel in inp_mel_paths: + mel_input = np.load(mel) + mel_input = torch.FloatTensor(mel_input) + item_name = mel[len(test_input_dir) + 1:].replace("/", "_") + item = binarizer_cls.process_mel_item(item_name, mel_input, None, binarization_args) + items.append(item) + sizes.append(item['len']) + return items, sizes diff --git a/NeuralSeq/tasks/vocoder/vocoder_base.py b/NeuralSeq/tasks/vocoder/vocoder_base.py new file mode 100644 index 0000000000000000000000000000000000000000..04f45af60c8ac1c1f8303d091f8c6031ec8451bf --- /dev/null +++ b/NeuralSeq/tasks/vocoder/vocoder_base.py @@ -0,0 +1,66 @@ +import os + +import torch +import torch.distributed as dist +from torch.utils.data import DistributedSampler + +from tasks.base_task import BaseTask +from tasks.base_task import data_loader +from tasks.vocoder.dataset_utils import VocoderDataset, EndlessDistributedSampler +from utils.hparams import hparams + + +class VocoderBaseTask(BaseTask): + def __init__(self): + super(VocoderBaseTask, self).__init__() + self.max_sentences = hparams['max_sentences'] + self.max_valid_sentences = hparams['max_valid_sentences'] + if self.max_valid_sentences == -1: + hparams['max_valid_sentences'] = self.max_valid_sentences = self.max_sentences + self.dataset_cls = VocoderDataset + + @data_loader + def train_dataloader(self): + train_dataset = self.dataset_cls('train', shuffle=True) + return self.build_dataloader(train_dataset, True, self.max_sentences, hparams['endless_ds']) + + @data_loader + def val_dataloader(self): + valid_dataset = self.dataset_cls('valid', shuffle=False) + return self.build_dataloader(valid_dataset, False, self.max_valid_sentences) + + @data_loader + def test_dataloader(self): + test_dataset = self.dataset_cls('test', shuffle=False) + return self.build_dataloader(test_dataset, False, self.max_valid_sentences) + + def build_dataloader(self, dataset, shuffle, max_sentences, endless=False): + world_size = 1 + rank = 0 + if dist.is_initialized(): + world_size = dist.get_world_size() + rank = dist.get_rank() + sampler_cls = DistributedSampler if not endless else EndlessDistributedSampler + train_sampler = sampler_cls( + dataset=dataset, + num_replicas=world_size, + rank=rank, + shuffle=shuffle, + ) + return torch.utils.data.DataLoader( + dataset=dataset, + shuffle=False, + collate_fn=dataset.collater, + batch_size=max_sentences, + num_workers=dataset.num_workers, + sampler=train_sampler, + pin_memory=True, + ) + + def test_start(self): + self.gen_dir = os.path.join(hparams['work_dir'], + f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}') + os.makedirs(self.gen_dir, exist_ok=True) + + def test_end(self, outputs): + return {} diff --git a/NeuralSeq/usr/.gitkeep b/NeuralSeq/usr/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/NeuralSeq/usr/__pycache__/__init__.cpython-37.pyc b/NeuralSeq/usr/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..113de6309e3e63413ebcfe597cc955dbc5df7a0a Binary files /dev/null and b/NeuralSeq/usr/__pycache__/__init__.cpython-37.pyc differ diff --git a/NeuralSeq/usr/__pycache__/__init__.cpython-38.pyc b/NeuralSeq/usr/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1ab6e3f8f7f54c2091146a5567ca8bbcc75b6cc Binary files /dev/null and b/NeuralSeq/usr/__pycache__/__init__.cpython-38.pyc differ diff --git a/NeuralSeq/usr/__pycache__/diffsinger_task.cpython-37.pyc b/NeuralSeq/usr/__pycache__/diffsinger_task.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e848eee13d2ee7f81de28d25eb55db077a7ea6c0 Binary files /dev/null and b/NeuralSeq/usr/__pycache__/diffsinger_task.cpython-37.pyc differ diff --git a/NeuralSeq/usr/__pycache__/diffsinger_task.cpython-38.pyc b/NeuralSeq/usr/__pycache__/diffsinger_task.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..895a14454641559e0f589b3186cc6bc648801877 Binary files /dev/null and b/NeuralSeq/usr/__pycache__/diffsinger_task.cpython-38.pyc differ diff --git a/NeuralSeq/usr/__pycache__/diffspeech_task.cpython-37.pyc b/NeuralSeq/usr/__pycache__/diffspeech_task.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2e88b4eef7861308c2c632c9fc8f84aceafb79f Binary files /dev/null and b/NeuralSeq/usr/__pycache__/diffspeech_task.cpython-37.pyc differ diff --git a/NeuralSeq/usr/__pycache__/diffspeech_task.cpython-38.pyc b/NeuralSeq/usr/__pycache__/diffspeech_task.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..697fb81dac438d52798135f3d31983dec1cdb56b Binary files /dev/null and b/NeuralSeq/usr/__pycache__/diffspeech_task.cpython-38.pyc differ diff --git a/NeuralSeq/usr/__pycache__/task.cpython-37.pyc b/NeuralSeq/usr/__pycache__/task.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd0ae5a87b2aaba005b71c5f85caf0dddb555264 Binary files /dev/null and b/NeuralSeq/usr/__pycache__/task.cpython-37.pyc differ diff --git a/NeuralSeq/usr/__pycache__/task.cpython-38.pyc b/NeuralSeq/usr/__pycache__/task.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dfa1f45ab43189c9963496fee2a87cd94a36433 Binary files /dev/null and b/NeuralSeq/usr/__pycache__/task.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__init__.py b/NeuralSeq/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4ea5c5a67e038c2213247dfb905942882c090a77 --- /dev/null +++ b/NeuralSeq/utils/__init__.py @@ -0,0 +1,250 @@ +import glob +import logging +import re +import time +from collections import defaultdict +import os +import sys +import shutil +import types +import numpy as np +import torch +import torch.nn.functional as F +import torch.distributed as dist +from torch import nn + + +def tensors_to_scalars(metrics): + new_metrics = {} + for k, v in metrics.items(): + if isinstance(v, torch.Tensor): + v = v.item() + if type(v) is dict: + v = tensors_to_scalars(v) + new_metrics[k] = v + return new_metrics + + +class AvgrageMeter(object): + + def __init__(self): + self.reset() + + def reset(self): + self.avg = 0 + self.sum = 0 + self.cnt = 0 + + def update(self, val, n=1): + self.sum += val * n + self.cnt += n + self.avg = self.sum / self.cnt + + +def collate_1d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None, shift_id=1): + """Convert a list of 1d tensors into a padded 2d tensor.""" + size = max(v.size(0) for v in values) if max_len is None else max_len + res = values[0].new(len(values), size).fill_(pad_idx) + + def copy_tensor(src, dst): + assert dst.numel() == src.numel() + if shift_right: + dst[1:] = src[:-1] + dst[0] = shift_id + else: + dst.copy_(src) + + for i, v in enumerate(values): + copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)]) + return res + + +def collate_2d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None): + """Convert a list of 2d tensors into a padded 3d tensor.""" + size = max(v.size(0) for v in values) if max_len is None else max_len + res = values[0].new(len(values), size, values[0].shape[1]).fill_(pad_idx) + + def copy_tensor(src, dst): + assert dst.numel() == src.numel() + if shift_right: + dst[1:] = src[:-1] + else: + dst.copy_(src) + + for i, v in enumerate(values): + copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)]) + return res + + +def _is_batch_full(batch, num_tokens, max_tokens, max_sentences): + if len(batch) == 0: + return 0 + if len(batch) == max_sentences: + return 1 + if num_tokens > max_tokens: + return 1 + return 0 + + +def batch_by_size( + indices, num_tokens_fn, max_tokens=None, max_sentences=None, + required_batch_size_multiple=1, distributed=False +): + """ + Yield mini-batches of indices bucketed by size. Batches may contain + sequences of different lengths. + + Args: + indices (List[int]): ordered list of dataset indices + num_tokens_fn (callable): function that returns the number of tokens at + a given index + max_tokens (int, optional): max number of tokens in each batch + (default: None). + max_sentences (int, optional): max number of sentences in each + batch (default: None). + required_batch_size_multiple (int, optional): require batch size to + be a multiple of N (default: 1). + """ + max_tokens = max_tokens if max_tokens is not None else sys.maxsize + max_sentences = max_sentences if max_sentences is not None else sys.maxsize + bsz_mult = required_batch_size_multiple + + if isinstance(indices, types.GeneratorType): + indices = np.fromiter(indices, dtype=np.int64, count=-1) + + sample_len = 0 + sample_lens = [] + batch = [] + batches = [] + for i in range(len(indices)): + idx = indices[i] + num_tokens = num_tokens_fn(idx) + sample_lens.append(num_tokens) + sample_len = max(sample_len, num_tokens) + assert sample_len <= max_tokens, ( + "sentence at index {} of size {} exceeds max_tokens " + "limit of {}!".format(idx, sample_len, max_tokens) + ) + num_tokens = (len(batch) + 1) * sample_len + + if _is_batch_full(batch, num_tokens, max_tokens, max_sentences): + mod_len = max( + bsz_mult * (len(batch) // bsz_mult), + len(batch) % bsz_mult, + ) + batches.append(batch[:mod_len]) + batch = batch[mod_len:] + sample_lens = sample_lens[mod_len:] + sample_len = max(sample_lens) if len(sample_lens) > 0 else 0 + batch.append(idx) + if len(batch) > 0: + batches.append(batch) + return batches + + +def make_positions(tensor, padding_idx): + """Replace non-padding symbols with their position numbers. + + Position numbers begin at padding_idx+1. Padding symbols are ignored. + """ + # The series of casts and type-conversions here are carefully + # balanced to both work with ONNX export and XLA. In particular XLA + # prefers ints, cumsum defaults to output longs, and ONNX doesn't know + # how to handle the dtype kwarg in cumsum. + mask = tensor.ne(padding_idx).int() + return ( + torch.cumsum(mask, dim=1).type_as(mask) * mask + ).long() + padding_idx + + +def softmax(x, dim): + return F.softmax(x, dim=dim, dtype=torch.float32) + + +def unpack_dict_to_list(samples): + samples_ = [] + bsz = samples.get('outputs').size(0) + for i in range(bsz): + res = {} + for k, v in samples.items(): + try: + res[k] = v[i] + except: + pass + samples_.append(res) + return samples_ + + +def load_ckpt(cur_model, ckpt_base_dir, prefix_in_ckpt='model', force=True, strict=True): + if os.path.isfile(ckpt_base_dir): + base_dir = os.path.dirname(ckpt_base_dir) + checkpoint_path = [ckpt_base_dir] + else: + base_dir = ckpt_base_dir + checkpoint_path = sorted(glob.glob(f'{base_dir}/model_ckpt_steps_*.ckpt'), key= + lambda x: int(re.findall(f'{base_dir}/model_ckpt_steps_(\d+).ckpt', x)[0])) + if len(checkpoint_path) > 0: + checkpoint_path = checkpoint_path[-1] + state_dict = torch.load(checkpoint_path, map_location="cpu")["state_dict"] + state_dict = {k[len(prefix_in_ckpt) + 1:]: v for k, v in state_dict.items() + if k.startswith(f'{prefix_in_ckpt}.')} + if not strict: + cur_model_state_dict = cur_model.state_dict() + unmatched_keys = [] + for key, param in state_dict.items(): + if key in cur_model_state_dict: + new_param = cur_model_state_dict[key] + if new_param.shape != param.shape: + unmatched_keys.append(key) + print("| Unmatched keys: ", key, new_param.shape, param.shape) + for key in unmatched_keys: + del state_dict[key] + cur_model.load_state_dict(state_dict, strict=strict) + print(f"| load '{prefix_in_ckpt}' from '{checkpoint_path}'.") + else: + e_msg = f"| ckpt not found in {base_dir}." + if force: + assert False, e_msg + else: + print(e_msg) + + +def remove_padding(x, padding_idx=0): + if x is None: + return None + assert len(x.shape) in [1, 2] + if len(x.shape) == 2: # [T, H] + return x[np.abs(x).sum(-1) != padding_idx] + elif len(x.shape) == 1: # [T] + return x[x != padding_idx] + + +class Timer: + timer_map = {} + + def __init__(self, name, print_time=False): + if name not in Timer.timer_map: + Timer.timer_map[name] = 0 + self.name = name + self.print_time = print_time + + def __enter__(self): + self.t = time.time() + + def __exit__(self, exc_type, exc_val, exc_tb): + Timer.timer_map[self.name] += time.time() - self.t + if self.print_time: + print(self.name, Timer.timer_map[self.name]) + + +def print_arch(model, model_name='model'): + print(f"| {model_name} Arch: ", model) + num_params(model, model_name=model_name) + + +def num_params(model, print_out=True, model_name="model"): + parameters = filter(lambda p: p.requires_grad, model.parameters()) + parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 + if print_out: + print(f'| {model_name} Trainable Parameters: %.3fM' % parameters) + return parameters diff --git a/NeuralSeq/utils/__pycache__/__init__.cpython-37.pyc b/NeuralSeq/utils/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ac266cc0d080ede678c7d97b7074eb1d7b3322b Binary files /dev/null and b/NeuralSeq/utils/__pycache__/__init__.cpython-37.pyc differ diff --git a/NeuralSeq/utils/__pycache__/__init__.cpython-38.pyc b/NeuralSeq/utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..783be91ecaa477d9b7aaedd30e63731ae7216235 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/__init__.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__pycache__/audio.cpython-37.pyc b/NeuralSeq/utils/__pycache__/audio.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b0a10b51558f76e29195e575105ba262db8eb7d Binary files /dev/null and b/NeuralSeq/utils/__pycache__/audio.cpython-37.pyc differ diff --git a/NeuralSeq/utils/__pycache__/audio.cpython-38.pyc b/NeuralSeq/utils/__pycache__/audio.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0662b9272e7d636b37d37337b52779516c039f3a Binary files /dev/null and b/NeuralSeq/utils/__pycache__/audio.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__pycache__/ckpt_utils.cpython-38.pyc b/NeuralSeq/utils/__pycache__/ckpt_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec8864bb3ce30f7692474c1fdbdc52ff4b5c81db Binary files /dev/null and b/NeuralSeq/utils/__pycache__/ckpt_utils.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__pycache__/cwt.cpython-37.pyc b/NeuralSeq/utils/__pycache__/cwt.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8d0a78cd107c906d77a5e3db8798babebfbe6f7 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/cwt.cpython-37.pyc differ diff --git a/NeuralSeq/utils/__pycache__/cwt.cpython-38.pyc b/NeuralSeq/utils/__pycache__/cwt.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41f98230bb6f9cbe8331f7e660d0803c7ce8c26e Binary files /dev/null and b/NeuralSeq/utils/__pycache__/cwt.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__pycache__/hparams.cpython-37.pyc b/NeuralSeq/utils/__pycache__/hparams.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad945de3179b98fefba1aafa6cbc9961dede92c5 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/hparams.cpython-37.pyc differ diff --git a/NeuralSeq/utils/__pycache__/hparams.cpython-38.pyc b/NeuralSeq/utils/__pycache__/hparams.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d60f94dd25fadcaf1c99786c6125a2f214c87fba Binary files /dev/null and b/NeuralSeq/utils/__pycache__/hparams.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__pycache__/indexed_datasets.cpython-37.pyc b/NeuralSeq/utils/__pycache__/indexed_datasets.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdd36ababd525554811cdceff6887a5aaab96230 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/indexed_datasets.cpython-37.pyc differ diff --git a/NeuralSeq/utils/__pycache__/indexed_datasets.cpython-38.pyc b/NeuralSeq/utils/__pycache__/indexed_datasets.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7edb99ffab83210de71f6f449c9a5f0ca676193 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/indexed_datasets.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__pycache__/multiprocess_utils.cpython-38.pyc b/NeuralSeq/utils/__pycache__/multiprocess_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..128ce769fd72bc26415b86543bd90e5d2dafc3fb Binary files /dev/null and b/NeuralSeq/utils/__pycache__/multiprocess_utils.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__pycache__/os_utils.cpython-38.pyc b/NeuralSeq/utils/__pycache__/os_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a5ce64215f69bea2822f160b6a3ff3125018186 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/os_utils.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__pycache__/pitch_utils.cpython-37.pyc b/NeuralSeq/utils/__pycache__/pitch_utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..614810c455cc02381fcff139b7ca3cd031d52bb4 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/pitch_utils.cpython-37.pyc differ diff --git a/NeuralSeq/utils/__pycache__/pitch_utils.cpython-38.pyc b/NeuralSeq/utils/__pycache__/pitch_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e37b0649a76f087eeb026228ef2cc27a40fbe44 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/pitch_utils.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__pycache__/pl_utils.cpython-37.pyc b/NeuralSeq/utils/__pycache__/pl_utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40ea98dcc40e207f1860fec22d19374b4dc71666 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/pl_utils.cpython-37.pyc differ diff --git a/NeuralSeq/utils/__pycache__/pl_utils.cpython-38.pyc b/NeuralSeq/utils/__pycache__/pl_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11f0a27a7ed0fd06fb7937bb733d3ec0349f2375 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/pl_utils.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__pycache__/plot.cpython-37.pyc b/NeuralSeq/utils/__pycache__/plot.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0050a6462192c1585e444f08ef16e56a5a04a981 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/plot.cpython-37.pyc differ diff --git a/NeuralSeq/utils/__pycache__/plot.cpython-38.pyc b/NeuralSeq/utils/__pycache__/plot.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0acdca3d51c9c0f6cd35f173f2cdef0810655b7c Binary files /dev/null and b/NeuralSeq/utils/__pycache__/plot.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__pycache__/text_encoder.cpython-37.pyc b/NeuralSeq/utils/__pycache__/text_encoder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83578bffa96c73ffb5c6ffc8ad9c036612701d27 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/text_encoder.cpython-37.pyc differ diff --git a/NeuralSeq/utils/__pycache__/text_encoder.cpython-38.pyc b/NeuralSeq/utils/__pycache__/text_encoder.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bd1f7e2ba260f2549de6bab5877dcbce3390f2c Binary files /dev/null and b/NeuralSeq/utils/__pycache__/text_encoder.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__pycache__/text_norm.cpython-38.pyc b/NeuralSeq/utils/__pycache__/text_norm.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd749117a8701f3c06db86abbe92bdf81d933870 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/text_norm.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__pycache__/training_utils.cpython-37.pyc b/NeuralSeq/utils/__pycache__/training_utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f06952edfaae558da40ff606df759fba87e1329d Binary files /dev/null and b/NeuralSeq/utils/__pycache__/training_utils.cpython-37.pyc differ diff --git a/NeuralSeq/utils/__pycache__/training_utils.cpython-38.pyc b/NeuralSeq/utils/__pycache__/training_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14f38a9ef32a4bc3640ccbeddc38294a343cbef0 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/training_utils.cpython-38.pyc differ diff --git a/NeuralSeq/utils/__pycache__/tts_utils.cpython-38.pyc b/NeuralSeq/utils/__pycache__/tts_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abdec3af193523852bdd2ce4359233d989a2bf91 Binary files /dev/null and b/NeuralSeq/utils/__pycache__/tts_utils.cpython-38.pyc differ diff --git a/NeuralSeq/utils/audio.py b/NeuralSeq/utils/audio.py new file mode 100644 index 0000000000000000000000000000000000000000..30be102e9add71068ebab579927d4166f6f06a54 --- /dev/null +++ b/NeuralSeq/utils/audio.py @@ -0,0 +1,92 @@ +import subprocess +import matplotlib +import os +matplotlib.use('Agg') +import librosa +import librosa.filters +import numpy as np +from scipy import signal +from scipy.io import wavfile + + +def save_wav(wav, path, sr, norm=False): + if norm: + wav = wav / np.abs(wav).max() + wav *= 32767 + # proposed by @dsmiller + wavfile.write(path, sr, wav.astype(np.int16)) + + +def get_hop_size(hparams): + hop_size = hparams['hop_size'] + if hop_size is None: + assert hparams['frame_shift_ms'] is not None + hop_size = int(hparams['frame_shift_ms'] / 1000 * hparams['audio_sample_rate']) + return hop_size + + +########################################################################################### +def _stft(y, hparams): + return librosa.stft(y=y, n_fft=hparams['fft_size'], hop_length=get_hop_size(hparams), + win_length=hparams['win_size'], pad_mode='constant') + + +def _istft(y, hparams): + return librosa.istft(y, hop_length=get_hop_size(hparams), win_length=hparams['win_size']) + + +def librosa_pad_lr(x, fsize, fshift, pad_sides=1): + '''compute right padding (final frame) or both sides padding (first and final frames) + ''' + assert pad_sides in (1, 2) + # return int(fsize // 2) + pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0] + if pad_sides == 1: + return 0, pad + else: + return pad // 2, pad // 2 + pad % 2 + + +# Conversions +def amp_to_db(x): + return 20 * np.log10(np.maximum(1e-5, x)) + + +def normalize(S, hparams): + return (S - hparams['min_level_db']) / -hparams['min_level_db'] + +def denormalize(D, hparams): + return (D * -hparams['min_level_db']) + hparams['min_level_db'] +def rnnoise(filename, out_fn=None, verbose=False, out_sample_rate=22050): + assert os.path.exists('./rnnoise/examples/rnnoise_demo'), INSTALL_STR + if out_fn is None: + out_fn = f"{filename[:-4]}.denoised.wav" + out_48k_fn = f"{out_fn}.48000.wav" + tmp0_fn = f"{out_fn}.0.wav" + tmp1_fn = f"{out_fn}.1.wav" + tmp2_fn = f"{out_fn}.2.raw" + tmp3_fn = f"{out_fn}.3.raw" + if verbose: + print("Pre-processing audio...") # wav to pcm raw + subprocess.check_call( + f'sox "{filename}" -G -r48000 "{tmp0_fn}"', shell=True, stdin=subprocess.PIPE) # convert to raw + subprocess.check_call( + f'sox -v 0.95 "{tmp0_fn}" "{tmp1_fn}"', shell=True, stdin=subprocess.PIPE) # convert to raw + subprocess.check_call( + f'ffmpeg -y -i "{tmp1_fn}" -loglevel quiet -f s16le -ac 1 -ar 48000 "{tmp2_fn}"', + shell=True, stdin=subprocess.PIPE) # convert to raw + if verbose: + print("Applying rnnoise algorithm to audio...") # rnnoise + subprocess.check_call( + f'./rnnoise/examples/rnnoise_demo "{tmp2_fn}" "{tmp3_fn}"', shell=True) + + if verbose: + print("Post-processing audio...") # pcm raw to wav + if filename == out_fn: + subprocess.check_call(f'rm -f "{out_fn}"', shell=True) + subprocess.check_call( + f'sox -t raw -r 48000 -b 16 -e signed-integer -c 1 "{tmp3_fn}" "{out_48k_fn}"', shell=True) + subprocess.check_call(f'sox "{out_48k_fn}" -G -r{out_sample_rate} "{out_fn}"', shell=True) + subprocess.check_call(f'rm -f "{tmp0_fn}" "{tmp1_fn}" "{tmp2_fn}" "{tmp3_fn}" "{out_48k_fn}"', shell=True) + if verbose: + print("Audio-filtering completed!") \ No newline at end of file diff --git a/NeuralSeq/utils/ckpt_utils.py b/NeuralSeq/utils/ckpt_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fc321f9ba891ffffc374df65871c3085bf898afb --- /dev/null +++ b/NeuralSeq/utils/ckpt_utils.py @@ -0,0 +1,68 @@ +import glob +import logging +import os +import re +import torch + + +def get_last_checkpoint(work_dir, steps=None): + checkpoint = None + last_ckpt_path = None + ckpt_paths = get_all_ckpts(work_dir, steps) + if len(ckpt_paths) > 0: + last_ckpt_path = ckpt_paths[0] + checkpoint = torch.load(last_ckpt_path, map_location='cpu') + logging.info(f'load module from checkpoint: {last_ckpt_path}') + return checkpoint, last_ckpt_path + + +def get_all_ckpts(work_dir, steps=None): + if steps is None: + ckpt_path_pattern = f'{work_dir}/model_ckpt_steps_*.ckpt' + else: + ckpt_path_pattern = f'{work_dir}/model_ckpt_steps_{steps}.ckpt' + return sorted(glob.glob(ckpt_path_pattern), + key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0])) + + +def load_ckpt(cur_model, ckpt_base_dir, model_name='model', force=True, strict=True): + if os.path.isfile(ckpt_base_dir): + base_dir = os.path.dirname(ckpt_base_dir) + ckpt_path = ckpt_base_dir + checkpoint = torch.load(ckpt_base_dir, map_location='cpu') + else: + base_dir = ckpt_base_dir + checkpoint, ckpt_path = get_last_checkpoint(ckpt_base_dir) + if checkpoint is not None: + state_dict = checkpoint["state_dict"] + if len([k for k in state_dict.keys() if '.' in k]) > 0: + state_dict = {k[len(model_name) + 1:]: v for k, v in state_dict.items() + if k.startswith(f'{model_name}.')} + else: + if '.' not in model_name: + state_dict = state_dict[model_name] + else: + base_model_name = model_name.split('.')[0] + rest_model_name = model_name[len(base_model_name) + 1:] + state_dict = { + k[len(rest_model_name) + 1:]: v for k, v in state_dict[base_model_name].items() + if k.startswith(f'{rest_model_name}.')} + if not strict: + cur_model_state_dict = cur_model.state_dict() + unmatched_keys = [] + for key, param in state_dict.items(): + if key in cur_model_state_dict: + new_param = cur_model_state_dict[key] + if new_param.shape != param.shape: + unmatched_keys.append(key) + print("| Unmatched keys: ", key, new_param.shape, param.shape) + for key in unmatched_keys: + del state_dict[key] + cur_model.load_state_dict(state_dict, strict=strict) + print(f"| load '{model_name}' from '{ckpt_path}'.") + else: + e_msg = f"| ckpt not found in {base_dir}." + if force: + assert False, e_msg + else: + print(e_msg) diff --git a/NeuralSeq/utils/cwt.py b/NeuralSeq/utils/cwt.py new file mode 100644 index 0000000000000000000000000000000000000000..1a08461b9e422aac614438e6240b7355b8e4bb2c --- /dev/null +++ b/NeuralSeq/utils/cwt.py @@ -0,0 +1,146 @@ +import librosa +import numpy as np +from pycwt import wavelet +from scipy.interpolate import interp1d + + +def load_wav(wav_file, sr): + wav, _ = librosa.load(wav_file, sr=sr, mono=True) + return wav + + +def convert_continuos_f0(f0): + '''CONVERT F0 TO CONTINUOUS F0 + Args: + f0 (ndarray): original f0 sequence with the shape (T) + Return: + (ndarray): continuous f0 with the shape (T) + ''' + # get uv information as binary + f0 = np.copy(f0) + uv = np.float32(f0 != 0) + + # get start and end of f0 + if (f0 == 0).all(): + print("| all of the f0 values are 0.") + return uv, f0 + start_f0 = f0[f0 != 0][0] + end_f0 = f0[f0 != 0][-1] + + # padding start and end of f0 sequence + start_idx = np.where(f0 == start_f0)[0][0] + end_idx = np.where(f0 == end_f0)[0][-1] + f0[:start_idx] = start_f0 + f0[end_idx:] = end_f0 + + # get non-zero frame index + nz_frames = np.where(f0 != 0)[0] + + # perform linear interpolation + f = interp1d(nz_frames, f0[nz_frames]) + cont_f0 = f(np.arange(0, f0.shape[0])) + + return uv, cont_f0 + + +def get_cont_lf0(f0, frame_period=5.0): + uv, cont_f0_lpf = convert_continuos_f0(f0) + # cont_f0_lpf = low_pass_filter(cont_f0_lpf, int(1.0 / (frame_period * 0.001)), cutoff=20) + cont_lf0_lpf = np.log(cont_f0_lpf) + return uv, cont_lf0_lpf + + +def get_lf0_cwt(lf0): + ''' + input: + signal of shape (N) + output: + Wavelet_lf0 of shape(10, N), scales of shape(10) + ''' + mother = wavelet.MexicanHat() + dt = 0.005 + dj = 1 + s0 = dt * 2 + J = 9 + + Wavelet_lf0, scales, _, _, _, _ = wavelet.cwt(np.squeeze(lf0), dt, dj, s0, J, mother) + # Wavelet.shape => (J + 1, len(lf0)) + Wavelet_lf0 = np.real(Wavelet_lf0).T + return Wavelet_lf0, scales + + +def norm_scale(Wavelet_lf0): + Wavelet_lf0_norm = np.zeros((Wavelet_lf0.shape[0], Wavelet_lf0.shape[1])) + mean = Wavelet_lf0.mean(0)[None, :] + std = Wavelet_lf0.std(0)[None, :] + Wavelet_lf0_norm = (Wavelet_lf0 - mean) / std + return Wavelet_lf0_norm, mean, std + + +def normalize_cwt_lf0(f0, mean, std): + uv, cont_lf0_lpf = get_cont_lf0(f0) + cont_lf0_norm = (cont_lf0_lpf - mean) / std + Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_norm) + Wavelet_lf0_norm, _, _ = norm_scale(Wavelet_lf0) + + return Wavelet_lf0_norm + + +def get_lf0_cwt_norm(f0s, mean, std): + uvs = list() + cont_lf0_lpfs = list() + cont_lf0_lpf_norms = list() + Wavelet_lf0s = list() + Wavelet_lf0s_norm = list() + scaless = list() + + means = list() + stds = list() + for f0 in f0s: + uv, cont_lf0_lpf = get_cont_lf0(f0) + cont_lf0_lpf_norm = (cont_lf0_lpf - mean) / std + + Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_lpf_norm) # [560,10] + Wavelet_lf0_norm, mean_scale, std_scale = norm_scale(Wavelet_lf0) # [560,10],[1,10],[1,10] + + Wavelet_lf0s_norm.append(Wavelet_lf0_norm) + uvs.append(uv) + cont_lf0_lpfs.append(cont_lf0_lpf) + cont_lf0_lpf_norms.append(cont_lf0_lpf_norm) + Wavelet_lf0s.append(Wavelet_lf0) + scaless.append(scales) + means.append(mean_scale) + stds.append(std_scale) + + return Wavelet_lf0s_norm, scaless, means, stds + + +def inverse_cwt_torch(Wavelet_lf0, scales): + import torch + b = ((torch.arange(0, len(scales)).float().to(Wavelet_lf0.device)[None, None, :] + 1 + 2.5) ** (-2.5)) + lf0_rec = Wavelet_lf0 * b + lf0_rec_sum = lf0_rec.sum(-1) + lf0_rec_sum = (lf0_rec_sum - lf0_rec_sum.mean(-1, keepdim=True)) / lf0_rec_sum.std(-1, keepdim=True) + return lf0_rec_sum + + +def inverse_cwt(Wavelet_lf0, scales): + b = ((np.arange(0, len(scales))[None, None, :] + 1 + 2.5) ** (-2.5)) + lf0_rec = Wavelet_lf0 * b + lf0_rec_sum = lf0_rec.sum(-1) + lf0_rec_sum = (lf0_rec_sum - lf0_rec_sum.mean(-1, keepdims=True)) / lf0_rec_sum.std(-1, keepdims=True) + return lf0_rec_sum + + +def cwt2f0(cwt_spec, mean, std, cwt_scales): + assert len(mean.shape) == 1 and len(std.shape) == 1 and len(cwt_spec.shape) == 3 + import torch + if isinstance(cwt_spec, torch.Tensor): + f0 = inverse_cwt_torch(cwt_spec, cwt_scales) + f0 = f0 * std[:, None] + mean[:, None] + f0 = f0.exp() # [B, T] + else: + f0 = inverse_cwt(cwt_spec, cwt_scales) + f0 = f0 * std[:, None] + mean[:, None] + f0 = np.exp(f0) # [B, T] + return f0 diff --git a/NeuralSeq/utils/dtw.py b/NeuralSeq/utils/dtw.py new file mode 100644 index 0000000000000000000000000000000000000000..464c4b747d792d23cf413675a47c9dddf67da134 --- /dev/null +++ b/NeuralSeq/utils/dtw.py @@ -0,0 +1,162 @@ +from numpy import array, zeros, full, argmin, inf, ndim +from scipy.spatial.distance import cdist +from math import isinf + + +def dtw(x, y, dist, warp=1, w=inf, s=1.0): + """ + Computes Dynamic Time Warping (DTW) of two sequences. + + :param array x: N1*M array + :param array y: N2*M array + :param func dist: distance used as cost measure + :param int warp: how many shifts are computed. + :param int w: window size limiting the maximal distance between indices of matched entries |i,j|. + :param float s: weight applied on off-diagonal moves of the path. As s gets larger, the warping path is increasingly biased towards the diagonal + Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path. + """ + assert len(x) + assert len(y) + assert isinf(w) or (w >= abs(len(x) - len(y))) + assert s > 0 + r, c = len(x), len(y) + if not isinf(w): + D0 = full((r + 1, c + 1), inf) + for i in range(1, r + 1): + D0[i, max(1, i - w):min(c + 1, i + w + 1)] = 0 + D0[0, 0] = 0 + else: + D0 = zeros((r + 1, c + 1)) + D0[0, 1:] = inf + D0[1:, 0] = inf + D1 = D0[1:, 1:] # view + for i in range(r): + for j in range(c): + if (isinf(w) or (max(0, i - w) <= j <= min(c, i + w))): + D1[i, j] = dist(x[i], y[j]) + C = D1.copy() + jrange = range(c) + for i in range(r): + if not isinf(w): + jrange = range(max(0, i - w), min(c, i + w + 1)) + for j in jrange: + min_list = [D0[i, j]] + for k in range(1, warp + 1): + i_k = min(i + k, r) + j_k = min(j + k, c) + min_list += [D0[i_k, j] * s, D0[i, j_k] * s] + D1[i, j] += min(min_list) + if len(x) == 1: + path = zeros(len(y)), range(len(y)) + elif len(y) == 1: + path = range(len(x)), zeros(len(x)) + else: + path = _traceback(D0) + return D1[-1, -1], C, D1, path + + +def accelerated_dtw(x, y, dist, warp=1): + """ + Computes Dynamic Time Warping (DTW) of two sequences in a faster way. + Instead of iterating through each element and calculating each distance, + this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html) + + :param array x: N1*M array + :param array y: N2*M array + :param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics. + If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'. + :param int warp: how many shifts are computed. + Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path. + """ + assert len(x) + assert len(y) + if ndim(x) == 1: + x = x.reshape(-1, 1) + if ndim(y) == 1: + y = y.reshape(-1, 1) + r, c = len(x), len(y) + D0 = zeros((r + 1, c + 1)) + D0[0, 1:] = inf + D0[1:, 0] = inf + D1 = D0[1:, 1:] + D0[1:, 1:] = cdist(x, y, dist) + C = D1.copy() + for i in range(r): + for j in range(c): + min_list = [D0[i, j]] + for k in range(1, warp + 1): + min_list += [D0[min(i + k, r), j], + D0[i, min(j + k, c)]] + D1[i, j] += min(min_list) + if len(x) == 1: + path = zeros(len(y)), range(len(y)) + elif len(y) == 1: + path = range(len(x)), zeros(len(x)) + else: + path = _traceback(D0) + return D1[-1, -1], C, D1, path + + +def _traceback(D): + i, j = array(D.shape) - 2 + p, q = [i], [j] + while (i > 0) or (j > 0): + tb = argmin((D[i, j], D[i, j + 1], D[i + 1, j])) + if tb == 0: + i -= 1 + j -= 1 + elif tb == 1: + i -= 1 + else: # (tb == 2): + j -= 1 + p.insert(0, i) + q.insert(0, j) + return array(p), array(q) + + +if __name__ == '__main__': + w = inf + s = 1.0 + if 1: # 1-D numeric + from sklearn.metrics.pairwise import manhattan_distances + import numpy as np + x = [0, 0, 1, 1, 2, 4, 2, 1, 2, 0] + x = np.array(x).reshape([-1,1,1]) + y = [1, 1, 1, 2, 2, 2, 2, 3, 2, 0] + y = np.array(y).reshape([-1,1,1]) + dist_fun = manhattan_distances + w = 1 + # s = 1.2 + elif 0: # 2-D numeric + from sklearn.metrics.pairwise import euclidean_distances + + x = [[0, 0], [0, 1], [1, 1], [1, 2], [2, 2], [4, 3], [2, 3], [1, 1], [2, 2], [0, 1]] + y = [[1, 0], [1, 1], [1, 1], [2, 1], [4, 3], [4, 3], [2, 3], [3, 1], [1, 2], [1, 0]] + dist_fun = euclidean_distances + else: # 1-D list of strings + from nltk.metrics.distance import edit_distance + + # x = ['we', 'shelled', 'clams', 'for', 'the', 'chowder'] + # y = ['class', 'too'] + x = ['i', 'soon', 'found', 'myself', 'muttering', 'to', 'the', 'walls'] + y = ['see', 'drown', 'himself'] + # x = 'we talked about the situation'.split() + # y = 'we talked about the situation'.split() + dist_fun = edit_distance + dist, cost, acc, path = dtw(x, y, dist_fun, w=w, s=s) + + # Vizualize + from matplotlib import pyplot as plt + + plt.imshow(cost.T, origin='lower', cmap=plt.cm.Reds, interpolation='nearest') + plt.plot(path[0], path[1], '-o') # relation + plt.xticks(range(len(x)), x) + plt.yticks(range(len(y)), y) + plt.xlabel('x') + plt.ylabel('y') + plt.axis('tight') + if isinf(w): + plt.title('Minimum distance: {}, slope weight: {}'.format(dist, s)) + else: + plt.title('Minimum distance: {}, window widht: {}, slope weight: {}'.format(dist, w, s)) + plt.show() diff --git a/NeuralSeq/utils/hparams.py b/NeuralSeq/utils/hparams.py new file mode 100644 index 0000000000000000000000000000000000000000..c76c5cfc896308d9a84c6254a7ca00b8235e7516 --- /dev/null +++ b/NeuralSeq/utils/hparams.py @@ -0,0 +1,129 @@ +import argparse +import os +import yaml + +global_print_hparams = True +hparams = {} + + +class Args: + def __init__(self, **kwargs): + for k, v in kwargs.items(): + self.__setattr__(k, v) + + +def override_config(old_config: dict, new_config: dict): + for k, v in new_config.items(): + if isinstance(v, dict) and k in old_config: + override_config(old_config[k], new_config[k]) + else: + old_config[k] = v + + +def set_hparams(config='', exp_name='', hparams_str='', print_hparams=True, global_hparams=True): + if config == '' and exp_name == '': + parser = argparse.ArgumentParser(description='') + parser.add_argument('--config', type=str, default='', + help='location of the data corpus') + parser.add_argument('--exp_name', type=str, default='', help='exp_name') + parser.add_argument('-hp', '--hparams', type=str, default='', + help='location of the data corpus') + parser.add_argument('--infer', action='store_true', help='infer') + parser.add_argument('--validate', action='store_true', help='validate') + parser.add_argument('--reset', action='store_true', help='reset hparams') + parser.add_argument('--remove', action='store_true', help='remove old ckpt') + parser.add_argument('--debug', action='store_true', help='debug') + args, unknown = parser.parse_known_args() + print("| Unknow hparams: ", unknown) + else: + args = Args(config=config, exp_name=exp_name, hparams=hparams_str, + infer=False, validate=False, reset=False, debug=False, remove=False) + global hparams + assert args.config != '' or args.exp_name != '' + if args.config != '': + assert os.path.exists(args.config) + + config_chains = [] + loaded_config = set() + + def load_config(config_fn): + # deep first inheritance and avoid the second visit of one node + if not os.path.exists(config_fn): + return {} + with open(config_fn) as f: + hparams_ = yaml.safe_load(f) + loaded_config.add(config_fn) + if 'base_config' in hparams_: + ret_hparams = {} + if not isinstance(hparams_['base_config'], list): + hparams_['base_config'] = [hparams_['base_config']] + for c in hparams_['base_config']: + if c.startswith('.'): + c = f'{os.path.dirname(config_fn)}/{c}' + c = os.path.normpath(c) + if c not in loaded_config: + override_config(ret_hparams, load_config(c)) + override_config(ret_hparams, hparams_) + else: + ret_hparams = hparams_ + config_chains.append(config_fn) + return ret_hparams + + saved_hparams = {} + args_work_dir = '' + if args.exp_name != '': + args_work_dir = f'{args.exp_name}' # modified + ckpt_config_path = f'{args_work_dir}/config.yaml' + if os.path.exists(ckpt_config_path): + with open(ckpt_config_path) as f: + saved_hparams_ = yaml.safe_load(f) + if saved_hparams_ is not None: + saved_hparams.update(saved_hparams_) + hparams_ = {} + if args.config != '': + hparams_.update(load_config(args.config)) + if not args.reset: + hparams_.update(saved_hparams) + hparams_['work_dir'] = args_work_dir + + # Support config overriding in command line. Support list type config overriding. + # Examples: --hparams="a=1,b.c=2,d=[1 1 1]" + if args.hparams != "": + for new_hparam in args.hparams.split(","): + k, v = new_hparam.split("=") + v = v.strip("\'\" ") + config_node = hparams_ + for k_ in k.split(".")[:-1]: + config_node = config_node[k_] + k = k.split(".")[-1] + if v in ['True', 'False'] or type(config_node[k]) in [bool, list, dict]: + if type(config_node[k]) == list: + v = v.replace(" ", ",") + config_node[k] = eval(v) + else: + config_node[k] = type(config_node[k])(v) + if args_work_dir != '' and args.remove: + answer = input("REMOVE old checkpoint? Y/N [Default: N]: ") + if answer.lower() == "y": + remove_file(args_work_dir) + if args_work_dir != '' and (not os.path.exists(ckpt_config_path) or args.reset) and not args.infer: + os.makedirs(hparams_['work_dir'], exist_ok=True) + with open(ckpt_config_path, 'w') as f: + yaml.safe_dump(hparams_, f) + + hparams_['infer'] = args.infer + hparams_['debug'] = args.debug + hparams_['validate'] = args.validate + hparams_['exp_name'] = args.exp_name + global global_print_hparams + if global_hparams: + hparams.clear() + hparams.update(hparams_) + if print_hparams and global_print_hparams and global_hparams: + print('| Hparams chains: ', config_chains) + print('| Hparams: ') + for i, (k, v) in enumerate(sorted(hparams_.items())): + print(f"\033[;33;m{k}\033[0m: {v}, ", end="\n" if i % 5 == 4 else "") + print("") + global_print_hparams = False + return hparams_ \ No newline at end of file diff --git a/NeuralSeq/utils/indexed_datasets.py b/NeuralSeq/utils/indexed_datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..e15632be30d6296a3c9aa80a1f351058003698b3 --- /dev/null +++ b/NeuralSeq/utils/indexed_datasets.py @@ -0,0 +1,71 @@ +import pickle +from copy import deepcopy + +import numpy as np + + +class IndexedDataset: + def __init__(self, path, num_cache=1): + super().__init__() + self.path = path + self.data_file = None + self.data_offsets = np.load(f"{path}.idx", allow_pickle=True).item()['offsets'] + self.data_file = open(f"{path}.data", 'rb', buffering=-1) + self.cache = [] + self.num_cache = num_cache + + def check_index(self, i): + if i < 0 or i >= len(self.data_offsets) - 1: + raise IndexError('index out of range') + + def __del__(self): + if self.data_file: + self.data_file.close() + + def __getitem__(self, i): + self.check_index(i) + if self.num_cache > 0: + for c in self.cache: + if c[0] == i: + return c[1] + self.data_file.seek(self.data_offsets[i]) + b = self.data_file.read(self.data_offsets[i + 1] - self.data_offsets[i]) + item = pickle.loads(b) + if self.num_cache > 0: + self.cache = [(i, deepcopy(item))] + self.cache[:-1] + return item + + def __len__(self): + return len(self.data_offsets) - 1 + +class IndexedDatasetBuilder: + def __init__(self, path): + self.path = path + self.out_file = open(f"{path}.data", 'wb') + self.byte_offsets = [0] + + def add_item(self, item): + s = pickle.dumps(item) + bytes = self.out_file.write(s) + self.byte_offsets.append(self.byte_offsets[-1] + bytes) + + def finalize(self): + self.out_file.close() + np.save(open(f"{self.path}.idx", 'wb'), {'offsets': self.byte_offsets}) + + +if __name__ == "__main__": + import random + from tqdm import tqdm + ds_path = '/tmp/indexed_ds_example' + size = 100 + items = [{"a": np.random.normal(size=[10000, 10]), + "b": np.random.normal(size=[10000, 10])} for i in range(size)] + builder = IndexedDatasetBuilder(ds_path) + for i in tqdm(range(size)): + builder.add_item(items[i]) + builder.finalize() + ds = IndexedDataset(ds_path) + for i in tqdm(range(10000)): + idx = random.randint(0, size - 1) + assert (ds[idx]['a'] == items[idx]['a']).all() diff --git a/NeuralSeq/utils/multiprocess_utils.py b/NeuralSeq/utils/multiprocess_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e7ed3c91aeee1103213fb573806423a9e1aef097 --- /dev/null +++ b/NeuralSeq/utils/multiprocess_utils.py @@ -0,0 +1,54 @@ +import os +import traceback +from multiprocessing import Queue, Process + + +def chunked_worker(worker_id, map_func, args, results_queue=None, init_ctx_func=None): + ctx = init_ctx_func(worker_id) if init_ctx_func is not None else None + for job_idx, arg in args: + try: + if ctx is not None: + res = map_func(*arg, ctx=ctx) + else: + res = map_func(*arg) + results_queue.put((job_idx, res)) + except: + traceback.print_exc() + results_queue.put((job_idx, None)) + +def chunked_multiprocess_run(map_func, args, num_workers=None, ordered=True, init_ctx_func=None, q_max_size=1000): + args = zip(range(len(args)), args) + args = list(args) + n_jobs = len(args) + if num_workers is None: + num_workers = int(os.getenv('N_PROC', os.cpu_count())) + results_queues = [] + if ordered: + for i in range(num_workers): + results_queues.append(Queue(maxsize=q_max_size // num_workers)) + else: + results_queue = Queue(maxsize=q_max_size) + for i in range(num_workers): + results_queues.append(results_queue) + workers = [] + for i in range(num_workers): + args_worker = args[i::num_workers] + p = Process(target=chunked_worker, args=( + i, map_func, args_worker, results_queues[i], init_ctx_func), daemon=True) + workers.append(p) + p.start() + for n_finished in range(n_jobs): + results_queue = results_queues[n_finished % num_workers] + job_idx, res = results_queue.get() + assert job_idx == n_finished or not ordered, (job_idx, n_finished) + yield res + for w in workers: + w.join() + w.close() + +def multiprocess_run_tqdm(map_func, args, num_workers=None, ordered=True, init_ctx_func=None, + multithread=False, desc=None): + for i, res in tqdm(enumerate( + multiprocess_run(map_func, args, num_workers, ordered, init_ctx_func, multithread)), + total=len(args), desc=desc): + yield i, res \ No newline at end of file diff --git a/NeuralSeq/utils/os_utils.py b/NeuralSeq/utils/os_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c78a44c04eadc3feb3c35f88c8a074f59ab23778 --- /dev/null +++ b/NeuralSeq/utils/os_utils.py @@ -0,0 +1,20 @@ +import os +import subprocess + + +def link_file(from_file, to_file): + subprocess.check_call( + f'ln -s "`realpath --relative-to="{os.path.dirname(to_file)}" "{from_file}"`" "{to_file}"', shell=True) + + +def move_file(from_file, to_file): + subprocess.check_call(f'mv "{from_file}" "{to_file}"', shell=True) + + +def copy_file(from_file, to_file): + subprocess.check_call(f'cp -r "{from_file}" "{to_file}"', shell=True) + + +def remove_file(*fns): + for f in fns: + subprocess.check_call(f'rm -rf "{f}"', shell=True) \ No newline at end of file diff --git a/NeuralSeq/utils/pitch_utils.py b/NeuralSeq/utils/pitch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f7fd166abd3a03bac5909e498669b482447435cf --- /dev/null +++ b/NeuralSeq/utils/pitch_utils.py @@ -0,0 +1,76 @@ +######### +# world +########## +import librosa +import numpy as np +import torch + +gamma = 0 +mcepInput = 3 # 0 for dB, 3 for magnitude +alpha = 0.45 +en_floor = 10 ** (-80 / 20) +FFT_SIZE = 2048 + + +f0_bin = 256 +f0_max = 1100.0 +f0_min = 50.0 +f0_mel_min = 1127 * np.log(1 + f0_min / 700) +f0_mel_max = 1127 * np.log(1 + f0_max / 700) + + +def f0_to_coarse(f0): + is_torch = isinstance(f0, torch.Tensor) + f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700) + f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1 + + f0_mel[f0_mel <= 1] = 1 + f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1 + f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int) + assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min()) + return f0_coarse + + +def norm_f0(f0, uv, hparams): + is_torch = isinstance(f0, torch.Tensor) + if hparams['pitch_norm'] == 'standard': + f0 = (f0 - hparams['f0_mean']) / hparams['f0_std'] + if hparams['pitch_norm'] == 'log': + f0 = torch.log2(f0) if is_torch else np.log2(f0) + if uv is not None and hparams['use_uv']: + f0[uv > 0] = 0 + return f0 + + +def norm_interp_f0(f0, hparams): + is_torch = isinstance(f0, torch.Tensor) + if is_torch: + device = f0.device + f0 = f0.data.cpu().numpy() + uv = f0 == 0 + f0 = norm_f0(f0, uv, hparams) + if sum(uv) == len(f0): + f0[uv] = 0 + elif sum(uv) > 0: + f0[uv] = np.interp(np.where(uv)[0], np.where(~uv)[0], f0[~uv]) + uv = torch.FloatTensor(uv) + f0 = torch.FloatTensor(f0) + if is_torch: + f0 = f0.to(device) + return f0, uv + + +def denorm_f0(f0, uv, hparams, pitch_padding=None, min=None, max=None): + if hparams['pitch_norm'] == 'standard': + f0 = f0 * hparams['f0_std'] + hparams['f0_mean'] + if hparams['pitch_norm'] == 'log': + f0 = 2 ** f0 + if min is not None: + f0 = f0.clamp(min=min) + if max is not None: + f0 = f0.clamp(max=max) + if uv is not None and hparams['use_uv']: + f0[uv > 0] = 0 + if pitch_padding is not None: + f0[pitch_padding] = 0 + return f0 diff --git a/NeuralSeq/utils/pl_utils.py b/NeuralSeq/utils/pl_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..76a94ed6abe22e349c51c49afdbf052d52b8d98b --- /dev/null +++ b/NeuralSeq/utils/pl_utils.py @@ -0,0 +1,1618 @@ +import matplotlib +from torch.nn import DataParallel +from torch.nn.parallel import DistributedDataParallel + +matplotlib.use('Agg') +import glob +import itertools +import subprocess +import threading +import traceback + +from pytorch_lightning.callbacks import GradientAccumulationScheduler +from pytorch_lightning.callbacks import ModelCheckpoint + +from functools import wraps +from torch.cuda._utils import _get_device_index +import numpy as np +import torch.optim +import torch.utils.data +import copy +import logging +import os +import re +import sys +import torch +import torch.distributed as dist +import torch.multiprocessing as mp +import tqdm +from torch.optim.optimizer import Optimizer + + +def get_a_var(obj): # pragma: no cover + if isinstance(obj, torch.Tensor): + return obj + + if isinstance(obj, list) or isinstance(obj, tuple): + for result in map(get_a_var, obj): + if isinstance(result, torch.Tensor): + return result + if isinstance(obj, dict): + for result in map(get_a_var, obj.items()): + if isinstance(result, torch.Tensor): + return result + return None + + +def data_loader(fn): + """ + Decorator to make any fx with this use the lazy property + :param fn: + :return: + """ + + wraps(fn) + attr_name = '_lazy_' + fn.__name__ + + def _get_data_loader(self): + try: + value = getattr(self, attr_name) + except AttributeError: + try: + value = fn(self) # Lazy evaluation, done only once. + if ( + value is not None and + not isinstance(value, list) and + fn.__name__ in ['test_dataloader', 'val_dataloader'] + ): + value = [value] + except AttributeError as e: + # Guard against AttributeError suppression. (Issue #142) + traceback.print_exc() + error = f'{fn.__name__}: An AttributeError was encountered: ' + str(e) + raise RuntimeError(error) from e + setattr(self, attr_name, value) # Memoize evaluation. + return value + + return _get_data_loader + + +def parallel_apply(modules, inputs, kwargs_tup=None, devices=None): # pragma: no cover + r"""Applies each `module` in :attr:`modules` in parallel on arguments + contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword) + on each of :attr:`devices`. + + Args: + modules (Module): modules to be parallelized + inputs (tensor): inputs to the modules + devices (list of int or torch.device): CUDA devices + + :attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and + :attr:`devices` (if given) should all have same length. Moreover, each + element of :attr:`inputs` can either be a single object as the only argument + to a module, or a collection of positional arguments. + """ + assert len(modules) == len(inputs) + if kwargs_tup is not None: + assert len(modules) == len(kwargs_tup) + else: + kwargs_tup = ({},) * len(modules) + if devices is not None: + assert len(modules) == len(devices) + else: + devices = [None] * len(modules) + devices = list(map(lambda x: _get_device_index(x, True), devices)) + lock = threading.Lock() + results = {} + grad_enabled = torch.is_grad_enabled() + + def _worker(i, module, input, kwargs, device=None): + torch.set_grad_enabled(grad_enabled) + if device is None: + device = get_a_var(input).get_device() + try: + with torch.cuda.device(device): + # this also avoids accidental slicing of `input` if it is a Tensor + if not isinstance(input, (list, tuple)): + input = (input,) + + # --------------- + # CHANGE + if module.training: + output = module.training_step(*input, **kwargs) + + elif module.testing: + output = module.test_step(*input, **kwargs) + + else: + output = module.validation_step(*input, **kwargs) + # --------------- + + with lock: + results[i] = output + except Exception as e: + with lock: + results[i] = e + + # make sure each module knows what training state it's in... + # fixes weird bug where copies are out of sync + root_m = modules[0] + for m in modules[1:]: + m.training = root_m.training + m.testing = root_m.testing + + if len(modules) > 1: + threads = [threading.Thread(target=_worker, + args=(i, module, input, kwargs, device)) + for i, (module, input, kwargs, device) in + enumerate(zip(modules, inputs, kwargs_tup, devices))] + + for thread in threads: + thread.start() + for thread in threads: + thread.join() + else: + _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0]) + + outputs = [] + for i in range(len(inputs)): + output = results[i] + if isinstance(output, Exception): + raise output + outputs.append(output) + return outputs + + +def _find_tensors(obj): # pragma: no cover + r""" + Recursively find all tensors contained in the specified object. + """ + if isinstance(obj, torch.Tensor): + return [obj] + if isinstance(obj, (list, tuple)): + return itertools.chain(*map(_find_tensors, obj)) + if isinstance(obj, dict): + return itertools.chain(*map(_find_tensors, obj.values())) + return [] + + +class DDP(DistributedDataParallel): + """ + Override the forward call in lightning so it goes to training and validation step respectively + """ + + def parallel_apply(self, replicas, inputs, kwargs): + return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) + + def forward(self, *inputs, **kwargs): # pragma: no cover + self._sync_params() + if self.device_ids: + inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) + if len(self.device_ids) == 1: + # -------------- + # LIGHTNING MOD + # -------------- + # normal + # output = self.module(*inputs[0], **kwargs[0]) + # lightning + if self.module.training: + output = self.module.training_step(*inputs[0], **kwargs[0]) + elif self.module.testing: + output = self.module.test_step(*inputs[0], **kwargs[0]) + else: + output = self.module.validation_step(*inputs[0], **kwargs[0]) + else: + outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs) + output = self.gather(outputs, self.output_device) + else: + # normal + output = self.module(*inputs, **kwargs) + + if torch.is_grad_enabled(): + # We'll return the output object verbatim since it is a freeform + # object. We need to find any tensors in this object, though, + # because we need to figure out which parameters were used during + # this forward pass, to ensure we short circuit reduction for any + # unused parameters. Only if `find_unused_parameters` is set. + if self.find_unused_parameters: + self.reducer.prepare_for_backward(list(_find_tensors(output))) + else: + self.reducer.prepare_for_backward([]) + return output + + +class DP(DataParallel): + """ + Override the forward call in lightning so it goes to training and validation step respectively + """ + + def forward(self, *inputs, **kwargs): + if not self.device_ids: + return self.module(*inputs, **kwargs) + + for t in itertools.chain(self.module.parameters(), self.module.buffers()): + if t.device != self.src_device_obj: + raise RuntimeError("module must have its parameters and buffers " + "on device {} (device_ids[0]) but found one of " + "them on device: {}".format(self.src_device_obj, t.device)) + + inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) + if len(self.device_ids) == 1: + # lightning + if self.module.training: + return self.module.training_step(*inputs[0], **kwargs[0]) + elif self.module.testing: + return self.module.test_step(*inputs[0], **kwargs[0]) + else: + return self.module.validation_step(*inputs[0], **kwargs[0]) + + replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) + outputs = self.parallel_apply(replicas, inputs, kwargs) + return self.gather(outputs, self.output_device) + + def parallel_apply(self, replicas, inputs, kwargs): + return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) + + +class GradientAccumulationScheduler: + def __init__(self, scheduling: dict): + if scheduling == {}: # empty dict error + raise TypeError("Empty dict cannot be interpreted correct") + + for key in scheduling.keys(): + if not isinstance(key, int) or not isinstance(scheduling[key], int): + raise TypeError("All epoches and accumulation factor must be integers") + + minimal_epoch = min(scheduling.keys()) + if minimal_epoch < 1: + msg = f"Epochs indexing from 1, epoch {minimal_epoch} cannot be interpreted correct" + raise IndexError(msg) + elif minimal_epoch != 1: # if user didnt define first epoch accumulation factor + scheduling.update({1: 1}) + + self.scheduling = scheduling + self.epochs = sorted(scheduling.keys()) + + def on_epoch_begin(self, epoch, trainer): + epoch += 1 # indexing epochs from 1 + for i in reversed(range(len(self.epochs))): + if epoch >= self.epochs[i]: + trainer.accumulate_grad_batches = self.scheduling.get(self.epochs[i]) + break + + +class LatestModelCheckpoint(ModelCheckpoint): + def __init__(self, filepath, monitor='val_loss', verbose=0, num_ckpt_keep=5, + save_weights_only=False, mode='auto', period=1, prefix='model', save_best=True): + super(ModelCheckpoint, self).__init__() + self.monitor = monitor + self.verbose = verbose + self.filepath = filepath + os.makedirs(filepath, exist_ok=True) + self.num_ckpt_keep = num_ckpt_keep + self.save_best = save_best + self.save_weights_only = save_weights_only + self.period = period + self.epochs_since_last_check = 0 + self.prefix = prefix + self.best_k_models = {} + # {filename: monitor} + self.kth_best_model = '' + self.save_top_k = 1 + self.task = None + if mode == 'min': + self.monitor_op = np.less + self.best = np.Inf + self.mode = 'min' + elif mode == 'max': + self.monitor_op = np.greater + self.best = -np.Inf + self.mode = 'max' + else: + if 'acc' in self.monitor or self.monitor.startswith('fmeasure'): + self.monitor_op = np.greater + self.best = -np.Inf + self.mode = 'max' + else: + self.monitor_op = np.less + self.best = np.Inf + self.mode = 'min' + if os.path.exists(f'{self.filepath}/best_valid.npy'): + self.best = np.load(f'{self.filepath}/best_valid.npy')[0] + + def get_all_ckpts(self): + return sorted(glob.glob(f'{self.filepath}/{self.prefix}_ckpt_steps_*.ckpt'), + key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0])) + + def on_epoch_end(self, epoch, logs=None): + logs = logs or {} + self.epochs_since_last_check += 1 + best_filepath = f'{self.filepath}/{self.prefix}_ckpt_best.pt' + if self.epochs_since_last_check >= self.period: + self.epochs_since_last_check = 0 + filepath = f'{self.filepath}/{self.prefix}_ckpt_steps_{self.task.global_step}.ckpt' + if self.verbose > 0: + logging.info(f'Epoch {epoch:05d}@{self.task.global_step}: saving model to {filepath}') + self._save_model(filepath) + for old_ckpt in self.get_all_ckpts()[self.num_ckpt_keep:]: + subprocess.check_call(f'rm -rf "{old_ckpt}"', shell=True) + if self.verbose > 0: + logging.info(f'Delete ckpt: {os.path.basename(old_ckpt)}') + current = logs.get(self.monitor) + if current is not None and self.save_best: + if self.monitor_op(current, self.best): + self.best = current + if self.verbose > 0: + logging.info( + f'Epoch {epoch:05d}@{self.task.global_step}: {self.monitor} reached' + f' {current:0.5f} (best {self.best:0.5f}), saving model to' + f' {best_filepath} as top 1') + self._save_model(best_filepath) + np.save(f'{self.filepath}/best_valid.npy', [self.best]) + + +class BaseTrainer: + def __init__( + self, + logger=True, + checkpoint_callback=True, + default_save_path=None, + gradient_clip_val=0, + process_position=0, + gpus=-1, + log_gpu_memory=None, + show_progress_bar=True, + track_grad_norm=-1, + check_val_every_n_epoch=1, + accumulate_grad_batches=1, + max_updates=1000, + min_epochs=1, + val_check_interval=1.0, + log_save_interval=100, + row_log_interval=10, + print_nan_grads=False, + weights_summary='full', + num_sanity_val_steps=5, + resume_from_checkpoint=None, + ): + self.log_gpu_memory = log_gpu_memory + self.gradient_clip_val = gradient_clip_val + self.check_val_every_n_epoch = check_val_every_n_epoch + self.track_grad_norm = track_grad_norm + self.on_gpu = True if (gpus and torch.cuda.is_available()) else False + self.process_position = process_position + self.weights_summary = weights_summary + self.max_updates = max_updates + self.min_epochs = min_epochs + self.num_sanity_val_steps = num_sanity_val_steps + self.print_nan_grads = print_nan_grads + self.resume_from_checkpoint = resume_from_checkpoint + self.default_save_path = default_save_path + + # training bookeeping + self.total_batch_idx = 0 + self.running_loss = [] + self.avg_loss = 0 + self.batch_idx = 0 + self.tqdm_metrics = {} + self.callback_metrics = {} + self.num_val_batches = 0 + self.num_training_batches = 0 + self.num_test_batches = 0 + self.get_train_dataloader = None + self.get_test_dataloaders = None + self.get_val_dataloaders = None + self.is_iterable_train_dataloader = False + + # training state + self.model = None + self.testing = False + self.disable_validation = False + self.lr_schedulers = [] + self.optimizers = None + self.global_step = 0 + self.current_epoch = 0 + self.total_batches = 0 + + # configure checkpoint callback + self.checkpoint_callback = checkpoint_callback + self.checkpoint_callback.save_function = self.save_checkpoint + self.weights_save_path = self.checkpoint_callback.filepath + + # accumulated grads + self.configure_accumulated_gradients(accumulate_grad_batches) + + # allow int, string and gpu list + self.data_parallel_device_ids = [ + int(x) for x in os.environ.get("CUDA_VISIBLE_DEVICES", "").split(",") if x != ''] + if len(self.data_parallel_device_ids) == 0: + self.root_gpu = None + self.on_gpu = False + else: + self.root_gpu = self.data_parallel_device_ids[0] + self.on_gpu = True + + # distributed backend choice + self.use_ddp = False + self.use_dp = False + self.single_gpu = False + self.distributed_backend = 'ddp' if self.num_gpus > 0 else 'dp' + self.set_distributed_mode(self.distributed_backend) + + self.proc_rank = 0 + self.world_size = 1 + self.node_rank = 0 + + # can't init progress bar here because starting a new process + # means the progress_bar won't survive pickling + self.show_progress_bar = show_progress_bar + + # logging + self.log_save_interval = log_save_interval + self.val_check_interval = val_check_interval + self.logger = logger + self.logger.rank = 0 + self.row_log_interval = row_log_interval + + @property + def num_gpus(self): + gpus = self.data_parallel_device_ids + if gpus is None: + return 0 + else: + return len(gpus) + + @property + def data_parallel(self): + return self.use_dp or self.use_ddp + + def get_model(self): + is_dp_module = isinstance(self.model, (DDP, DP)) + model = self.model.module if is_dp_module else self.model + return model + + # ----------------------------- + # MODEL TRAINING + # ----------------------------- + def fit(self, model): + if self.use_ddp: + mp.spawn(self.ddp_train, nprocs=self.num_gpus, args=(model,)) + else: + model.model = model.build_model() + if not self.testing: + self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers()) + if self.use_dp: + model.cuda(self.root_gpu) + model = DP(model, device_ids=self.data_parallel_device_ids) + elif self.single_gpu: + model.cuda(self.root_gpu) + self.run_pretrain_routine(model) + return 1 + + def init_optimizers(self, optimizers): + + # single optimizer + if isinstance(optimizers, Optimizer): + return [optimizers], [] + + # two lists + elif len(optimizers) == 2 and isinstance(optimizers[0], list): + optimizers, lr_schedulers = optimizers + return optimizers, lr_schedulers + + # single list or tuple + elif isinstance(optimizers, list) or isinstance(optimizers, tuple): + return optimizers, [] + + def run_pretrain_routine(self, model): + """Sanity check a few things before starting actual training. + + :param model: + """ + ref_model = model + if self.data_parallel: + ref_model = model.module + + # give model convenience properties + ref_model.trainer = self + + # set local properties on the model + self.copy_trainer_model_properties(ref_model) + + # link up experiment object + if self.logger is not None: + ref_model.logger = self.logger + self.logger.save() + + if self.use_ddp: + dist.barrier() + + # set up checkpoint callback + # self.configure_checkpoint_callback() + + # transfer data loaders from model + self.get_dataloaders(ref_model) + + # track model now. + # if cluster resets state, the model will update with the saved weights + self.model = model + + # restore training and model before hpc call + self.restore_weights(model) + + # when testing requested only run test and return + if self.testing: + self.run_evaluation(test=True) + return + + # check if we should run validation during training + self.disable_validation = self.num_val_batches == 0 + + # run tiny validation (if validation defined) + # to make sure program won't crash during val + ref_model.on_sanity_check_start() + ref_model.on_train_start() + if not self.disable_validation and self.num_sanity_val_steps > 0: + # init progress bars for validation sanity check + pbar = tqdm.tqdm(desc='Validation sanity check', + total=self.num_sanity_val_steps * len(self.get_val_dataloaders()), + leave=False, position=2 * self.process_position, + disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch') + self.main_progress_bar = pbar + # dummy validation progress bar + self.val_progress_bar = tqdm.tqdm(disable=True) + + self.evaluate(model, self.get_val_dataloaders(), self.num_sanity_val_steps, self.testing) + + # close progress bars + self.main_progress_bar.close() + self.val_progress_bar.close() + + # init progress bar + pbar = tqdm.tqdm(leave=True, position=2 * self.process_position, + disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch', + file=sys.stdout) + self.main_progress_bar = pbar + + # clear cache before training + if self.on_gpu: + torch.cuda.empty_cache() + + # CORE TRAINING LOOP + self.train() + + def test(self, model): + self.testing = True + self.fit(model) + + @property + def training_tqdm_dict(self): + tqdm_dict = { + 'step': '{}'.format(self.global_step), + } + tqdm_dict.update(self.tqdm_metrics) + return tqdm_dict + + # -------------------- + # restore ckpt + # -------------------- + def restore_weights(self, model): + """ + To restore weights we have two cases. + First, attempt to restore hpc weights. If successful, don't restore + other weights. + + Otherwise, try to restore actual weights + :param model: + :return: + """ + # clear cache before restore + if self.on_gpu: + torch.cuda.empty_cache() + + if self.resume_from_checkpoint is not None: + self.restore(self.resume_from_checkpoint, on_gpu=self.on_gpu) + else: + # restore weights if same exp version + self.restore_state_if_checkpoint_exists(model) + + # wait for all models to restore weights + if self.use_ddp: + # wait for all processes to catch up + dist.barrier() + + # clear cache after restore + if self.on_gpu: + torch.cuda.empty_cache() + + def restore_state_if_checkpoint_exists(self, model): + did_restore = False + + # do nothing if there's not dir or callback + no_ckpt_callback = (self.checkpoint_callback is None) or (not self.checkpoint_callback) + if no_ckpt_callback or not os.path.exists(self.checkpoint_callback.filepath): + return did_restore + + # restore trainer state and model if there is a weight for this experiment + last_steps = -1 + last_ckpt_name = None + + # find last epoch + checkpoints = os.listdir(self.checkpoint_callback.filepath) + for name in checkpoints: + if '.ckpt' in name and not name.endswith('part'): + if 'steps_' in name: + steps = name.split('steps_')[1] + steps = int(re.sub('[^0-9]', '', steps)) + + if steps > last_steps: + last_steps = steps + last_ckpt_name = name + + # restore last checkpoint + if last_ckpt_name is not None: + last_ckpt_path = os.path.join(self.checkpoint_callback.filepath, last_ckpt_name) + self.restore(last_ckpt_path, self.on_gpu) + logging.info(f'model and trainer restored from checkpoint: {last_ckpt_path}') + did_restore = True + + return did_restore + + def restore(self, checkpoint_path, on_gpu): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + + # load model state + model = self.get_model() + + # load the state_dict on the model automatically + model.load_state_dict(checkpoint['state_dict'], strict=False) + if on_gpu: + model.cuda(self.root_gpu) + # load training state (affects trainer only) + self.restore_training_state(checkpoint) + model.global_step = self.global_step + del checkpoint + + try: + if dist.is_initialized() and dist.get_rank() > 0: + return + except Exception as e: + print(e) + return + + def restore_training_state(self, checkpoint): + """ + Restore trainer state. + Model will get its change to update + :param checkpoint: + :return: + """ + if self.checkpoint_callback is not None and self.checkpoint_callback is not False: + self.checkpoint_callback.best = checkpoint['checkpoint_callback_best'] + + self.global_step = checkpoint['global_step'] + self.current_epoch = checkpoint['epoch'] + + if self.testing: + return + + # restore the optimizers + optimizer_states = checkpoint['optimizer_states'] + for optimizer, opt_state in zip(self.optimizers, optimizer_states): + if optimizer is None: + return + optimizer.load_state_dict(opt_state) + + # move optimizer to GPU 1 weight at a time + # avoids OOM + if self.root_gpu is not None: + for state in optimizer.state.values(): + for k, v in state.items(): + if isinstance(v, torch.Tensor): + state[k] = v.cuda(self.root_gpu) + + # restore the lr schedulers + lr_schedulers = checkpoint['lr_schedulers'] + for scheduler, lrs_state in zip(self.lr_schedulers, lr_schedulers): + scheduler.load_state_dict(lrs_state) + + # -------------------- + # MODEL SAVE CHECKPOINT + # -------------------- + def _atomic_save(self, checkpoint, filepath): + """Saves a checkpoint atomically, avoiding the creation of incomplete checkpoints. + + This will create a temporary checkpoint with a suffix of ``.part``, then copy it to the final location once + saving is finished. + + Args: + checkpoint (object): The object to save. + Built to be used with the ``dump_checkpoint`` method, but can deal with anything which ``torch.save`` + accepts. + filepath (str|pathlib.Path): The path to which the checkpoint will be saved. + This points to the file that the checkpoint will be stored in. + """ + tmp_path = str(filepath) + ".part" + torch.save(checkpoint, tmp_path) + os.replace(tmp_path, filepath) + + def save_checkpoint(self, filepath): + checkpoint = self.dump_checkpoint() + self._atomic_save(checkpoint, filepath) + + def dump_checkpoint(self): + + checkpoint = { + 'epoch': self.current_epoch, + 'global_step': self.global_step + } + + if self.checkpoint_callback is not None and self.checkpoint_callback is not False: + checkpoint['checkpoint_callback_best'] = self.checkpoint_callback.best + + # save optimizers + optimizer_states = [] + for i, optimizer in enumerate(self.optimizers): + if optimizer is not None: + optimizer_states.append(optimizer.state_dict()) + + checkpoint['optimizer_states'] = optimizer_states + + # save lr schedulers + lr_schedulers = [] + for i, scheduler in enumerate(self.lr_schedulers): + lr_schedulers.append(scheduler.state_dict()) + + checkpoint['lr_schedulers'] = lr_schedulers + + # add the hparams and state_dict from the model + model = self.get_model() + checkpoint['state_dict'] = model.state_dict() + # give the model a chance to add a few things + model.on_save_checkpoint(checkpoint) + + return checkpoint + + def copy_trainer_model_properties(self, model): + if isinstance(model, DP): + ref_model = model.module + elif isinstance(model, DDP): + ref_model = model.module + else: + ref_model = model + + for m in [model, ref_model]: + m.trainer = self + m.on_gpu = self.on_gpu + m.use_dp = self.use_dp + m.use_ddp = self.use_ddp + m.testing = self.testing + m.single_gpu = self.single_gpu + + def transfer_batch_to_gpu(self, batch, gpu_id): + # base case: object can be directly moved using `cuda` or `to` + if callable(getattr(batch, 'cuda', None)): + return batch.cuda(gpu_id, non_blocking=True) + + elif callable(getattr(batch, 'to', None)): + return batch.to(torch.device('cuda', gpu_id), non_blocking=True) + + # when list + elif isinstance(batch, list): + for i, x in enumerate(batch): + batch[i] = self.transfer_batch_to_gpu(x, gpu_id) + return batch + + # when tuple + elif isinstance(batch, tuple): + batch = list(batch) + for i, x in enumerate(batch): + batch[i] = self.transfer_batch_to_gpu(x, gpu_id) + return tuple(batch) + + # when dict + elif isinstance(batch, dict): + for k, v in batch.items(): + batch[k] = self.transfer_batch_to_gpu(v, gpu_id) + + return batch + + # nothing matches, return the value as is without transform + return batch + + def set_distributed_mode(self, distributed_backend): + # skip for CPU + if self.num_gpus == 0: + return + + # single GPU case + # in single gpu case we allow ddp so we can train on multiple + # nodes, 1 gpu per node + elif self.num_gpus == 1: + self.single_gpu = True + self.use_dp = False + self.use_ddp = False + self.root_gpu = 0 + self.data_parallel_device_ids = [0] + else: + if distributed_backend is not None: + self.use_dp = distributed_backend == 'dp' + self.use_ddp = distributed_backend == 'ddp' + elif distributed_backend is None: + self.use_dp = True + self.use_ddp = False + + logging.info(f'gpu available: {torch.cuda.is_available()}, used: {self.on_gpu}') + + def ddp_train(self, gpu_idx, model): + """ + Entry point into a DP thread + :param gpu_idx: + :param model: + :param cluster_obj: + :return: + """ + # otherwise default to node rank 0 + self.node_rank = 0 + + # show progressbar only on progress_rank 0 + self.show_progress_bar = self.show_progress_bar and self.node_rank == 0 and gpu_idx == 0 + + # determine which process we are and world size + if self.use_ddp: + self.proc_rank = self.node_rank * self.num_gpus + gpu_idx + self.world_size = self.num_gpus + + # let the exp know the rank to avoid overwriting logs + if self.logger is not None: + self.logger.rank = self.proc_rank + + # set up server using proc 0's ip address + # try to init for 20 times at max in case ports are taken + # where to store ip_table + model.trainer = self + model.init_ddp_connection(self.proc_rank, self.world_size) + + # CHOOSE OPTIMIZER + # allow for lr schedulers as well + model.model = model.build_model() + if not self.testing: + self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers()) + + # MODEL + # copy model to each gpu + if self.distributed_backend == 'ddp': + torch.cuda.set_device(gpu_idx) + model.cuda(gpu_idx) + + # set model properties before going into wrapper + self.copy_trainer_model_properties(model) + + # override root GPU + self.root_gpu = gpu_idx + + if self.distributed_backend == 'ddp': + device_ids = [gpu_idx] + else: + device_ids = None + + # allow user to configure ddp + model = model.configure_ddp(model, device_ids) + + # continue training routine + self.run_pretrain_routine(model) + + def resolve_root_node_address(self, root_node): + if '[' in root_node: + name = root_node.split('[')[0] + number = root_node.split(',')[0] + if '-' in number: + number = number.split('-')[0] + + number = re.sub('[^0-9]', '', number) + root_node = name + number + + return root_node + + def log_metrics(self, metrics, grad_norm_dic, step=None): + """Logs the metric dict passed in. + + :param metrics: + :param grad_norm_dic: + """ + # added metrics by Lightning for convenience + metrics['epoch'] = self.current_epoch + + # add norms + metrics.update(grad_norm_dic) + + # turn all tensors to scalars + scalar_metrics = self.metrics_to_scalars(metrics) + + step = step if step is not None else self.global_step + # log actual metrics + if self.proc_rank == 0 and self.logger is not None: + self.logger.log_metrics(scalar_metrics, step=step) + self.logger.save() + + def add_tqdm_metrics(self, metrics): + for k, v in metrics.items(): + if type(v) is torch.Tensor: + v = v.item() + + self.tqdm_metrics[k] = v + + def metrics_to_scalars(self, metrics): + new_metrics = {} + for k, v in metrics.items(): + if isinstance(v, torch.Tensor): + v = v.item() + + if type(v) is dict: + v = self.metrics_to_scalars(v) + + new_metrics[k] = v + + return new_metrics + + def process_output(self, output, train=False): + """Reduces output according to the training mode. + + Separates loss from logging and tqdm metrics + :param output: + :return: + """ + # --------------- + # EXTRACT CALLBACK KEYS + # --------------- + # all keys not progress_bar or log are candidates for callbacks + callback_metrics = {} + for k, v in output.items(): + if k not in ['progress_bar', 'log', 'hiddens']: + callback_metrics[k] = v + + if train and self.use_dp: + num_gpus = self.num_gpus + callback_metrics = self.reduce_distributed_output(callback_metrics, num_gpus) + + for k, v in callback_metrics.items(): + if isinstance(v, torch.Tensor): + callback_metrics[k] = v.item() + + # --------------- + # EXTRACT PROGRESS BAR KEYS + # --------------- + try: + progress_output = output['progress_bar'] + + # reduce progress metrics for tqdm when using dp + if train and self.use_dp: + num_gpus = self.num_gpus + progress_output = self.reduce_distributed_output(progress_output, num_gpus) + + progress_bar_metrics = progress_output + except Exception: + progress_bar_metrics = {} + + # --------------- + # EXTRACT LOGGING KEYS + # --------------- + # extract metrics to log to experiment + try: + log_output = output['log'] + + # reduce progress metrics for tqdm when using dp + if train and self.use_dp: + num_gpus = self.num_gpus + log_output = self.reduce_distributed_output(log_output, num_gpus) + + log_metrics = log_output + except Exception: + log_metrics = {} + + # --------------- + # EXTRACT LOSS + # --------------- + # if output dict doesn't have the keyword loss + # then assume the output=loss if scalar + loss = None + if train: + try: + loss = output['loss'] + except Exception: + if type(output) is torch.Tensor: + loss = output + else: + raise RuntimeError( + 'No `loss` value in the dictionary returned from `model.training_step()`.' + ) + + # when using dp need to reduce the loss + if self.use_dp: + loss = self.reduce_distributed_output(loss, self.num_gpus) + + # --------------- + # EXTRACT HIDDEN + # --------------- + hiddens = output.get('hiddens') + + # use every metric passed in as a candidate for callback + callback_metrics.update(progress_bar_metrics) + callback_metrics.update(log_metrics) + + # convert tensors to numpy + for k, v in callback_metrics.items(): + if isinstance(v, torch.Tensor): + callback_metrics[k] = v.item() + + return loss, progress_bar_metrics, log_metrics, callback_metrics, hiddens + + def reduce_distributed_output(self, output, num_gpus): + if num_gpus <= 1: + return output + + # when using DP, we get one output per gpu + # average outputs and return + if type(output) is torch.Tensor: + return output.mean() + + for k, v in output.items(): + # recurse on nested dics + if isinstance(output[k], dict): + output[k] = self.reduce_distributed_output(output[k], num_gpus) + + # do nothing when there's a scalar + elif isinstance(output[k], torch.Tensor) and output[k].dim() == 0: + pass + + # reduce only metrics that have the same number of gpus + elif output[k].size(0) == num_gpus: + reduced = torch.mean(output[k]) + output[k] = reduced + return output + + def clip_gradients(self): + if self.gradient_clip_val > 0: + model = self.get_model() + torch.nn.utils.clip_grad_norm_(model.parameters(), self.gradient_clip_val) + + def print_nan_gradients(self): + model = self.get_model() + for param in model.parameters(): + if (param.grad is not None) and torch.isnan(param.grad.float()).any(): + logging.info(param, param.grad) + + def configure_accumulated_gradients(self, accumulate_grad_batches): + self.accumulate_grad_batches = None + + if isinstance(accumulate_grad_batches, dict): + self.accumulation_scheduler = GradientAccumulationScheduler(accumulate_grad_batches) + elif isinstance(accumulate_grad_batches, int): + schedule = {1: accumulate_grad_batches} + self.accumulation_scheduler = GradientAccumulationScheduler(schedule) + else: + raise TypeError("Gradient accumulation supports only int and dict types") + + def get_dataloaders(self, model): + if not self.testing: + self.init_train_dataloader(model) + self.init_val_dataloader(model) + else: + self.init_test_dataloader(model) + + if self.use_ddp: + dist.barrier() + if not self.testing: + self.get_train_dataloader() + self.get_val_dataloaders() + else: + self.get_test_dataloaders() + + def init_train_dataloader(self, model): + self.fisrt_epoch = True + self.get_train_dataloader = model.train_dataloader + if isinstance(self.get_train_dataloader(), torch.utils.data.DataLoader): + self.num_training_batches = len(self.get_train_dataloader()) + self.num_training_batches = int(self.num_training_batches) + else: + self.num_training_batches = float('inf') + self.is_iterable_train_dataloader = True + if isinstance(self.val_check_interval, int): + self.val_check_batch = self.val_check_interval + else: + self._percent_range_check('val_check_interval') + self.val_check_batch = int(self.num_training_batches * self.val_check_interval) + self.val_check_batch = max(1, self.val_check_batch) + + def init_val_dataloader(self, model): + self.get_val_dataloaders = model.val_dataloader + self.num_val_batches = 0 + if self.get_val_dataloaders() is not None: + if isinstance(self.get_val_dataloaders()[0], torch.utils.data.DataLoader): + self.num_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders()) + self.num_val_batches = int(self.num_val_batches) + else: + self.num_val_batches = float('inf') + + def init_test_dataloader(self, model): + self.get_test_dataloaders = model.test_dataloader + if self.get_test_dataloaders() is not None: + if isinstance(self.get_test_dataloaders()[0], torch.utils.data.DataLoader): + self.num_test_batches = sum(len(dataloader) for dataloader in self.get_test_dataloaders()) + self.num_test_batches = int(self.num_test_batches) + else: + self.num_test_batches = float('inf') + + def evaluate(self, model, dataloaders, max_batches, test=False): + """Run evaluation code. + + :param model: PT model + :param dataloaders: list of PT dataloaders + :param max_batches: Scalar + :param test: boolean + :return: + """ + # enable eval mode + model.zero_grad() + model.eval() + + # copy properties for forward overrides + self.copy_trainer_model_properties(model) + + # disable gradients to save memory + torch.set_grad_enabled(False) + + if test: + self.get_model().test_start() + # bookkeeping + outputs = [] + + # run training + for dataloader_idx, dataloader in enumerate(dataloaders): + dl_outputs = [] + for batch_idx, batch in enumerate(dataloader): + + if batch is None: # pragma: no cover + continue + + # stop short when on fast_dev_run (sets max_batch=1) + if batch_idx >= max_batches: + break + + # ----------------- + # RUN EVALUATION STEP + # ----------------- + output = self.evaluation_forward(model, + batch, + batch_idx, + dataloader_idx, + test) + + # track outputs for collation + dl_outputs.append(output) + + # batch done + if test: + self.test_progress_bar.update(1) + else: + self.val_progress_bar.update(1) + outputs.append(dl_outputs) + + # with a single dataloader don't pass an array + if len(dataloaders) == 1: + outputs = outputs[0] + + # give model a chance to do something with the outputs (and method defined) + model = self.get_model() + if test: + eval_results_ = model.test_end(outputs) + else: + eval_results_ = model.validation_end(outputs) + eval_results = eval_results_ + + # enable train mode again + model.train() + + # enable gradients to save memory + torch.set_grad_enabled(True) + + return eval_results + + def run_evaluation(self, test=False): + # when testing make sure user defined a test step + model = self.get_model() + model.on_pre_performance_check() + + # select dataloaders + if test: + dataloaders = self.get_test_dataloaders() + max_batches = self.num_test_batches + else: + # val + dataloaders = self.get_val_dataloaders() + max_batches = self.num_val_batches + + # init validation or test progress bar + # main progress bar will already be closed when testing so initial position is free + position = 2 * self.process_position + (not test) + desc = 'Testing' if test else 'Validating' + pbar = tqdm.tqdm(desc=desc, total=max_batches, leave=test, position=position, + disable=not self.show_progress_bar, dynamic_ncols=True, + unit='batch', file=sys.stdout) + setattr(self, f'{"test" if test else "val"}_progress_bar', pbar) + + # run evaluation + eval_results = self.evaluate(self.model, + dataloaders, + max_batches, + test) + if eval_results is not None: + _, prog_bar_metrics, log_metrics, callback_metrics, _ = self.process_output( + eval_results) + + # add metrics to prog bar + self.add_tqdm_metrics(prog_bar_metrics) + + # log metrics + self.log_metrics(log_metrics, {}) + + # track metrics for callbacks + self.callback_metrics.update(callback_metrics) + + # hook + model.on_post_performance_check() + + # add model specific metrics + tqdm_metrics = self.training_tqdm_dict + if not test: + self.main_progress_bar.set_postfix(**tqdm_metrics) + + # close progress bar + if test: + self.test_progress_bar.close() + else: + self.val_progress_bar.close() + + # model checkpointing + if self.proc_rank == 0 and self.checkpoint_callback is not None and not test: + self.checkpoint_callback.on_epoch_end(epoch=self.current_epoch, + logs=self.callback_metrics) + + def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test=False): + # make dataloader_idx arg in validation_step optional + args = [batch, batch_idx] + + if test and len(self.get_test_dataloaders()) > 1: + args.append(dataloader_idx) + + elif not test and len(self.get_val_dataloaders()) > 1: + args.append(dataloader_idx) + + # handle DP, DDP forward + if self.use_ddp or self.use_dp: + output = model(*args) + return output + + # single GPU + if self.single_gpu: + # for single GPU put inputs on gpu manually + root_gpu = 0 + if isinstance(self.data_parallel_device_ids, list): + root_gpu = self.data_parallel_device_ids[0] + batch = self.transfer_batch_to_gpu(batch, root_gpu) + args[0] = batch + + # CPU + if test: + output = model.test_step(*args) + else: + output = model.validation_step(*args) + + return output + + def train(self): + model = self.get_model() + # run all epochs + for epoch in range(self.current_epoch, 1000000): + # set seed for distributed sampler (enables shuffling for each epoch) + if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'): + self.get_train_dataloader().sampler.set_epoch(epoch) + + # get model + model = self.get_model() + + # update training progress in trainer and model + model.current_epoch = epoch + self.current_epoch = epoch + + total_val_batches = 0 + if not self.disable_validation: + # val can be checked multiple times in epoch + is_val_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0 + val_checks_per_epoch = self.num_training_batches // self.val_check_batch + val_checks_per_epoch = val_checks_per_epoch if is_val_epoch else 0 + total_val_batches = self.num_val_batches * val_checks_per_epoch + + # total batches includes multiple val checks + self.total_batches = self.num_training_batches + total_val_batches + self.batch_loss_value = 0 # accumulated grads + + if self.is_iterable_train_dataloader: + # for iterable train loader, the progress bar never ends + num_iterations = None + else: + num_iterations = self.total_batches + + # reset progress bar + # .reset() doesn't work on disabled progress bar so we should check + desc = f'Epoch {epoch + 1}' if not self.is_iterable_train_dataloader else '' + self.main_progress_bar.set_description(desc) + + # changing gradient according accumulation_scheduler + self.accumulation_scheduler.on_epoch_begin(epoch, self) + + # ----------------- + # RUN TNG EPOCH + # ----------------- + self.run_training_epoch() + + # update LR schedulers + if self.lr_schedulers is not None: + for lr_scheduler in self.lr_schedulers: + lr_scheduler.step(epoch=self.current_epoch) + + self.main_progress_bar.close() + + model.on_train_end() + + if self.logger is not None: + self.logger.finalize("success") + + def run_training_epoch(self): + # before epoch hook + if self.is_function_implemented('on_epoch_start'): + model = self.get_model() + model.on_epoch_start() + + # run epoch + for batch_idx, batch in enumerate(self.get_train_dataloader()): + # stop epoch if we limited the number of training batches + if batch_idx >= self.num_training_batches: + break + + self.batch_idx = batch_idx + + model = self.get_model() + model.global_step = self.global_step + + # --------------- + # RUN TRAIN STEP + # --------------- + output = self.run_training_batch(batch, batch_idx) + batch_result, grad_norm_dic, batch_step_metrics = output + + # when returning -1 from train_step, we end epoch early + early_stop_epoch = batch_result == -1 + + # --------------- + # RUN VAL STEP + # --------------- + should_check_val = ( + not self.disable_validation and self.global_step % self.val_check_batch == 0 and not self.fisrt_epoch) + self.fisrt_epoch = False + + if should_check_val: + self.run_evaluation(test=self.testing) + + # when logs should be saved + should_save_log = (batch_idx + 1) % self.log_save_interval == 0 or early_stop_epoch + if should_save_log: + if self.proc_rank == 0 and self.logger is not None: + self.logger.save() + + # when metrics should be logged + should_log_metrics = batch_idx % self.row_log_interval == 0 or early_stop_epoch + if should_log_metrics: + # logs user requested information to logger + self.log_metrics(batch_step_metrics, grad_norm_dic) + + self.global_step += 1 + self.total_batch_idx += 1 + + # end epoch early + # stop when the flag is changed or we've gone past the amount + # requested in the batches + if early_stop_epoch: + break + if self.global_step > self.max_updates: + print("| Training end..") + exit() + + # epoch end hook + if self.is_function_implemented('on_epoch_end'): + model = self.get_model() + model.on_epoch_end() + + def run_training_batch(self, batch, batch_idx): + # track grad norms + grad_norm_dic = {} + + # track all metrics for callbacks + all_callback_metrics = [] + + # track metrics to log + all_log_metrics = [] + + if batch is None: + return 0, grad_norm_dic, {} + + # hook + if self.is_function_implemented('on_batch_start'): + model_ref = self.get_model() + response = model_ref.on_batch_start(batch) + + if response == -1: + return -1, grad_norm_dic, {} + + splits = [batch] + self.hiddens = None + for split_idx, split_batch in enumerate(splits): + self.split_idx = split_idx + + # call training_step once per optimizer + for opt_idx, optimizer in enumerate(self.optimizers): + if optimizer is None: + continue + # make sure only the gradients of the current optimizer's paramaters are calculated + # in the training step to prevent dangling gradients in multiple-optimizer setup. + if len(self.optimizers) > 1: + for param in self.get_model().parameters(): + param.requires_grad = False + for group in optimizer.param_groups: + for param in group['params']: + param.requires_grad = True + + # wrap the forward step in a closure so second order methods work + def optimizer_closure(): + # forward pass + output = self.training_forward( + split_batch, batch_idx, opt_idx, self.hiddens) + + closure_loss = output[0] + progress_bar_metrics = output[1] + log_metrics = output[2] + callback_metrics = output[3] + self.hiddens = output[4] + if closure_loss is None: + return None + + # accumulate loss + # (if accumulate_grad_batches = 1 no effect) + closure_loss = closure_loss / self.accumulate_grad_batches + + # backward pass + model_ref = self.get_model() + if closure_loss.requires_grad: + model_ref.backward(closure_loss, optimizer) + + # track metrics for callbacks + all_callback_metrics.append(callback_metrics) + + # track progress bar metrics + self.add_tqdm_metrics(progress_bar_metrics) + all_log_metrics.append(log_metrics) + + # insert after step hook + if self.is_function_implemented('on_after_backward'): + model_ref = self.get_model() + model_ref.on_after_backward() + + return closure_loss + + # calculate loss + loss = optimizer_closure() + if loss is None: + continue + + # nan grads + if self.print_nan_grads: + self.print_nan_gradients() + + # track total loss for logging (avoid mem leaks) + self.batch_loss_value += loss.item() + + # gradient update with accumulated gradients + if (self.batch_idx + 1) % self.accumulate_grad_batches == 0: + + # track gradient norms when requested + if batch_idx % self.row_log_interval == 0: + if self.track_grad_norm > 0: + model = self.get_model() + grad_norm_dic = model.grad_norm( + self.track_grad_norm) + + # clip gradients + self.clip_gradients() + + # calls .step(), .zero_grad() + # override function to modify this behavior + model = self.get_model() + model.optimizer_step(self.current_epoch, batch_idx, optimizer, opt_idx) + + # calculate running loss for display + self.running_loss.append(self.batch_loss_value) + self.batch_loss_value = 0 + self.avg_loss = np.mean(self.running_loss[-100:]) + + # activate batch end hook + if self.is_function_implemented('on_batch_end'): + model = self.get_model() + model.on_batch_end() + + # update progress bar + self.main_progress_bar.update(1) + self.main_progress_bar.set_postfix(**self.training_tqdm_dict) + + # collapse all metrics into one dict + all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()} + + # track all metrics for callbacks + self.callback_metrics.update({k: v for d in all_callback_metrics for k, v in d.items()}) + + return 0, grad_norm_dic, all_log_metrics + + def training_forward(self, batch, batch_idx, opt_idx, hiddens): + """ + Handle forward for each training case (distributed, single gpu, etc...) + :param batch: + :param batch_idx: + :return: + """ + # --------------- + # FORWARD + # --------------- + # enable not needing to add opt_idx to training_step + args = [batch, batch_idx, opt_idx] + + # distributed forward + if self.use_ddp or self.use_dp: + output = self.model(*args) + # single GPU forward + elif self.single_gpu: + gpu_id = 0 + if isinstance(self.data_parallel_device_ids, list): + gpu_id = self.data_parallel_device_ids[0] + batch = self.transfer_batch_to_gpu(copy.copy(batch), gpu_id) + args[0] = batch + output = self.model.training_step(*args) + # CPU forward + else: + output = self.model.training_step(*args) + + # allow any mode to define training_end + model_ref = self.get_model() + output_ = model_ref.training_end(output) + if output_ is not None: + output = output_ + + # format and reduce outputs accordingly + output = self.process_output(output, train=True) + + return output + + # --------------- + # Utils + # --------------- + def is_function_implemented(self, f_name): + model = self.get_model() + f_op = getattr(model, f_name, None) + return callable(f_op) + + def _percent_range_check(self, name): + value = getattr(self, name) + msg = f"`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}." + if name == "val_check_interval": + msg += " If you want to disable validation set `val_percent_check` to 0.0 instead." + + if not 0. <= value <= 1.: + raise ValueError(msg) diff --git a/NeuralSeq/utils/plot.py b/NeuralSeq/utils/plot.py new file mode 100644 index 0000000000000000000000000000000000000000..bdca62a8cd80869c707890cd9febd39966cd3658 --- /dev/null +++ b/NeuralSeq/utils/plot.py @@ -0,0 +1,56 @@ +import matplotlib.pyplot as plt +import numpy as np +import torch + +LINE_COLORS = ['w', 'r', 'y', 'cyan', 'm', 'b', 'lime'] + + +def spec_to_figure(spec, vmin=None, vmax=None): + if isinstance(spec, torch.Tensor): + spec = spec.cpu().numpy() + fig = plt.figure(figsize=(12, 6)) + plt.pcolor(spec.T, vmin=vmin, vmax=vmax) + return fig + + +def spec_f0_to_figure(spec, f0s, figsize=None): + max_y = spec.shape[1] + if isinstance(spec, torch.Tensor): + spec = spec.detach().cpu().numpy() + f0s = {k: f0.detach().cpu().numpy() for k, f0 in f0s.items()} + f0s = {k: f0 / 10 for k, f0 in f0s.items()} + fig = plt.figure(figsize=(12, 6) if figsize is None else figsize) + plt.pcolor(spec.T) + for i, (k, f0) in enumerate(f0s.items()): + plt.plot(f0.clip(0, max_y), label=k, c=LINE_COLORS[i], linewidth=1, alpha=0.8) + plt.legend() + return fig + + +def dur_to_figure(dur_gt, dur_pred, txt): + dur_gt = dur_gt.long().cpu().numpy() + dur_pred = dur_pred.long().cpu().numpy() + dur_gt = np.cumsum(dur_gt) + dur_pred = np.cumsum(dur_pred) + fig = plt.figure(figsize=(12, 6)) + for i in range(len(dur_gt)): + shift = (i % 8) + 1 + plt.text(dur_gt[i], shift, txt[i]) + plt.text(dur_pred[i], 10 + shift, txt[i]) + plt.vlines(dur_gt[i], 0, 10, colors='b') # blue is gt + plt.vlines(dur_pred[i], 10, 20, colors='r') # red is pred + return fig + + +def f0_to_figure(f0_gt, f0_cwt=None, f0_pred=None): + fig = plt.figure() + f0_gt = f0_gt.cpu().numpy() + plt.plot(f0_gt, color='r', label='gt') + if f0_cwt is not None: + f0_cwt = f0_cwt.cpu().numpy() + plt.plot(f0_cwt, color='b', label='cwt') + if f0_pred is not None: + f0_pred = f0_pred.cpu().numpy() + plt.plot(f0_pred, color='green', label='pred') + plt.legend() + return fig diff --git a/NeuralSeq/utils/text_encoder.py b/NeuralSeq/utils/text_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..d9e0758abc7b4e1f452481cba9715df08ceab543 --- /dev/null +++ b/NeuralSeq/utils/text_encoder.py @@ -0,0 +1,304 @@ +import re +import six +from six.moves import range # pylint: disable=redefined-builtin + +PAD = "" +EOS = "" +UNK = "" +SEG = "|" +RESERVED_TOKENS = [PAD, EOS, UNK] +NUM_RESERVED_TOKENS = len(RESERVED_TOKENS) +PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0 +EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1 +UNK_ID = RESERVED_TOKENS.index(UNK) # Normally 2 + +if six.PY2: + RESERVED_TOKENS_BYTES = RESERVED_TOKENS +else: + RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")] + +# Regular expression for unescaping token strings. +# '\u' is converted to '_' +# '\\' is converted to '\' +# '\213;' is converted to unichr(213) +_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);") +_ESCAPE_CHARS = set(u"\\_u;0123456789") + + +def strip_ids(ids, ids_to_strip): + """Strip ids_to_strip from the end ids.""" + ids = list(ids) + while ids and ids[-1] in ids_to_strip: + ids.pop() + return ids + + +class TextEncoder(object): + """Base class for converting from ints to/from human readable strings.""" + + def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS): + self._num_reserved_ids = num_reserved_ids + + @property + def num_reserved_ids(self): + return self._num_reserved_ids + + def encode(self, s): + """Transform a human-readable string into a sequence of int ids. + + The ids should be in the range [num_reserved_ids, vocab_size). Ids [0, + num_reserved_ids) are reserved. + + EOS is not appended. + + Args: + s: human-readable string to be converted. + + Returns: + ids: list of integers + """ + return [int(w) + self._num_reserved_ids for w in s.split()] + + def decode(self, ids, strip_extraneous=False): + """Transform a sequence of int ids into a human-readable string. + + EOS is not expected in ids. + + Args: + ids: list of integers to be converted. + strip_extraneous: bool, whether to strip off extraneous tokens + (EOS and PAD). + + Returns: + s: human-readable string. + """ + if strip_extraneous: + ids = strip_ids(ids, list(range(self._num_reserved_ids or 0))) + return " ".join(self.decode_list(ids)) + + def decode_list(self, ids): + """Transform a sequence of int ids into a their string versions. + + This method supports transforming individual input/output ids to their + string versions so that sequence to/from text conversions can be visualized + in a human readable format. + + Args: + ids: list of integers to be converted. + + Returns: + strs: list of human-readable string. + """ + decoded_ids = [] + for id_ in ids: + if 0 <= id_ < self._num_reserved_ids: + decoded_ids.append(RESERVED_TOKENS[int(id_)]) + else: + decoded_ids.append(id_ - self._num_reserved_ids) + return [str(d) for d in decoded_ids] + + @property + def vocab_size(self): + raise NotImplementedError() + + +class ByteTextEncoder(TextEncoder): + """Encodes each byte to an id. For 8-bit strings only.""" + + def encode(self, s): + numres = self._num_reserved_ids + if six.PY2: + if isinstance(s, unicode): + s = s.encode("utf-8") + return [ord(c) + numres for c in s] + # Python3: explicitly convert to UTF-8 + return [c + numres for c in s.encode("utf-8")] + + def decode(self, ids, strip_extraneous=False): + if strip_extraneous: + ids = strip_ids(ids, list(range(self._num_reserved_ids or 0))) + numres = self._num_reserved_ids + decoded_ids = [] + int2byte = six.int2byte + for id_ in ids: + if 0 <= id_ < numres: + decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)]) + else: + decoded_ids.append(int2byte(id_ - numres)) + if six.PY2: + return "".join(decoded_ids) + # Python3: join byte arrays and then decode string + return b"".join(decoded_ids).decode("utf-8", "replace") + + def decode_list(self, ids): + numres = self._num_reserved_ids + decoded_ids = [] + int2byte = six.int2byte + for id_ in ids: + if 0 <= id_ < numres: + decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)]) + else: + decoded_ids.append(int2byte(id_ - numres)) + # Python3: join byte arrays and then decode string + return decoded_ids + + @property + def vocab_size(self): + return 2**8 + self._num_reserved_ids + + +class ByteTextEncoderWithEos(ByteTextEncoder): + """Encodes each byte to an id and appends the EOS token.""" + + def encode(self, s): + return super(ByteTextEncoderWithEos, self).encode(s) + [EOS_ID] + + +class TokenTextEncoder(TextEncoder): + """Encoder based on a user-supplied vocabulary (file or list).""" + + def __init__(self, + vocab_filename, + reverse=False, + vocab_list=None, + replace_oov=None, + num_reserved_ids=NUM_RESERVED_TOKENS): + """Initialize from a file or list, one token per line. + + Handling of reserved tokens works as follows: + - When initializing from a list, we add reserved tokens to the vocab. + - When initializing from a file, we do not add reserved tokens to the vocab. + - When saving vocab files, we save reserved tokens to the file. + + Args: + vocab_filename: If not None, the full filename to read vocab from. If this + is not None, then vocab_list should be None. + reverse: Boolean indicating if tokens should be reversed during encoding + and decoding. + vocab_list: If not None, a list of elements of the vocabulary. If this is + not None, then vocab_filename should be None. + replace_oov: If not None, every out-of-vocabulary token seen when + encoding will be replaced by this string (which must be in vocab). + num_reserved_ids: Number of IDs to save for reserved tokens like . + """ + super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids) + self._reverse = reverse + self._replace_oov = replace_oov + if vocab_filename: + self._init_vocab_from_file(vocab_filename) + else: + assert vocab_list is not None + self._init_vocab_from_list(vocab_list) + self.pad_index = self._token_to_id[PAD] + self.eos_index = self._token_to_id[EOS] + self.unk_index = self._token_to_id[UNK] + self.seg_index = self._token_to_id[SEG] if SEG in self._token_to_id else self.eos_index + + def encode(self, s): + """Converts a space-separated string of tokens to a list of ids.""" + sentence = s + tokens = sentence.strip().split() + if self._replace_oov is not None: + tokens = [t if t in self._token_to_id else self._replace_oov + for t in tokens] + ret = [self._token_to_id[tok] for tok in tokens] + return ret[::-1] if self._reverse else ret + + def decode(self, ids, strip_eos=False, strip_padding=False): + if strip_padding and self.pad() in list(ids): + pad_pos = list(ids).index(self.pad()) + ids = ids[:pad_pos] + if strip_eos and self.eos() in list(ids): + eos_pos = list(ids).index(self.eos()) + ids = ids[:eos_pos] + return " ".join(self.decode_list(ids)) + + def decode_list(self, ids): + seq = reversed(ids) if self._reverse else ids + return [self._safe_id_to_token(i) for i in seq] + + @property + def vocab_size(self): + return len(self._id_to_token) + + def __len__(self): + return self.vocab_size + + def _safe_id_to_token(self, idx): + return self._id_to_token.get(idx, "ID_%d" % idx) + + def _init_vocab_from_file(self, filename): + """Load vocab from a file. + + Args: + filename: The file to load vocabulary from. + """ + with open(filename) as f: + tokens = [token.strip() for token in f.readlines()] + + def token_gen(): + for token in tokens: + yield token + + self._init_vocab(token_gen(), add_reserved_tokens=False) + + def _init_vocab_from_list(self, vocab_list): + """Initialize tokens from a list of tokens. + + It is ok if reserved tokens appear in the vocab list. They will be + removed. The set of tokens in vocab_list should be unique. + + Args: + vocab_list: A list of tokens. + """ + def token_gen(): + for token in vocab_list: + if token not in RESERVED_TOKENS: + yield token + + self._init_vocab(token_gen()) + + def _init_vocab(self, token_generator, add_reserved_tokens=True): + """Initialize vocabulary with tokens from token_generator.""" + + self._id_to_token = {} + non_reserved_start_index = 0 + + if add_reserved_tokens: + self._id_to_token.update(enumerate(RESERVED_TOKENS)) + non_reserved_start_index = len(RESERVED_TOKENS) + + self._id_to_token.update( + enumerate(token_generator, start=non_reserved_start_index)) + + # _token_to_id is the reverse of _id_to_token + self._token_to_id = dict((v, k) + for k, v in six.iteritems(self._id_to_token)) + + def pad(self): + return self.pad_index + + def eos(self): + return self.eos_index + + def unk(self): + return self.unk_index + + def seg(self): + return self.seg_index + + def store_to_file(self, filename): + """Write vocab file to disk. + + Vocab files have one token per line. The file ends in a newline. Reserved + tokens are written to the vocab file as well. + + Args: + filename: Full path of the file to store the vocab to. + """ + with open(filename, "w") as f: + for i in range(len(self._id_to_token)): + f.write(self._id_to_token[i] + "\n") + + def sil_phonemes(self): + return [p for p in self._id_to_token.values() if not p[0].isalpha()] diff --git a/NeuralSeq/utils/text_norm.py b/NeuralSeq/utils/text_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..d0973cebc91e0525aeb6657e70012a1d37b5e6ff --- /dev/null +++ b/NeuralSeq/utils/text_norm.py @@ -0,0 +1,790 @@ +# coding=utf-8 +# Authors: +# 2019.5 Zhiyang Zhou (https://github.com/Joee1995/chn_text_norm.git) +# 2019.9 Jiayu DU +# +# requirements: +# - python 3.X +# notes: python 2.X WILL fail or produce misleading results + +import sys, os, argparse, codecs, string, re + +# ================================================================================ # +# basic constant +# ================================================================================ # +CHINESE_DIGIS = u'零一二三四五六七八九' +BIG_CHINESE_DIGIS_SIMPLIFIED = u'零壹贰叁肆伍陆柒捌玖' +BIG_CHINESE_DIGIS_TRADITIONAL = u'零壹貳參肆伍陸柒捌玖' +SMALLER_BIG_CHINESE_UNITS_SIMPLIFIED = u'十百千万' +SMALLER_BIG_CHINESE_UNITS_TRADITIONAL = u'拾佰仟萬' +LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'亿兆京垓秭穰沟涧正载' +LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'億兆京垓秭穰溝澗正載' +SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'十百千万' +SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'拾佰仟萬' + +ZERO_ALT = u'〇' +ONE_ALT = u'幺' +TWO_ALTS = [u'两', u'兩'] + +POSITIVE = [u'正', u'正'] +NEGATIVE = [u'负', u'負'] +POINT = [u'点', u'點'] +# PLUS = [u'加', u'加'] +# SIL = [u'杠', u'槓'] + +# 中文数字系统类型 +NUMBERING_TYPES = ['low', 'mid', 'high'] + +CURRENCY_NAMES = '(人民币|美元|日元|英镑|欧元|马克|法郎|加拿大元|澳元|港币|先令|芬兰马克|爱尔兰镑|' \ + '里拉|荷兰盾|埃斯库多|比塞塔|印尼盾|林吉特|新西兰元|比索|卢布|新加坡元|韩元|泰铢)' +CURRENCY_UNITS = '((亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)' +COM_QUANTIFIERS = '(匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|' \ + '砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|' \ + '针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|' \ + '毫|厘|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|' \ + '盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|旬|' \ + '纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块)' + +# punctuation information are based on Zhon project (https://github.com/tsroten/zhon.git) +CHINESE_PUNC_STOP = '!?。。' +CHINESE_PUNC_NON_STOP = '"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏' +CHINESE_PUNC_LIST = CHINESE_PUNC_STOP + CHINESE_PUNC_NON_STOP + + +# ================================================================================ # +# basic class +# ================================================================================ # +class ChineseChar(object): + """ + 中文字符 + 每个字符对应简体和繁体, + e.g. 简体 = '负', 繁体 = '負' + 转换时可转换为简体或繁体 + """ + + def __init__(self, simplified, traditional): + self.simplified = simplified + self.traditional = traditional + # self.__repr__ = self.__str__ + + def __str__(self): + return self.simplified or self.traditional or None + + def __repr__(self): + return self.__str__() + + +class ChineseNumberUnit(ChineseChar): + """ + 中文数字/数位字符 + 每个字符除繁简体外还有一个额外的大写字符 + e.g. '陆' 和 '陸' + """ + + def __init__(self, power, simplified, traditional, big_s, big_t): + super(ChineseNumberUnit, self).__init__(simplified, traditional) + self.power = power + self.big_s = big_s + self.big_t = big_t + + def __str__(self): + return '10^{}'.format(self.power) + + @classmethod + def create(cls, index, value, numbering_type=NUMBERING_TYPES[1], small_unit=False): + + if small_unit: + return ChineseNumberUnit(power=index + 1, + simplified=value[0], traditional=value[1], big_s=value[1], big_t=value[1]) + elif numbering_type == NUMBERING_TYPES[0]: + return ChineseNumberUnit(power=index + 8, + simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) + elif numbering_type == NUMBERING_TYPES[1]: + return ChineseNumberUnit(power=(index + 2) * 4, + simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) + elif numbering_type == NUMBERING_TYPES[2]: + return ChineseNumberUnit(power=pow(2, index + 3), + simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) + else: + raise ValueError( + 'Counting type should be in {0} ({1} provided).'.format(NUMBERING_TYPES, numbering_type)) + + +class ChineseNumberDigit(ChineseChar): + """ + 中文数字字符 + """ + + def __init__(self, value, simplified, traditional, big_s, big_t, alt_s=None, alt_t=None): + super(ChineseNumberDigit, self).__init__(simplified, traditional) + self.value = value + self.big_s = big_s + self.big_t = big_t + self.alt_s = alt_s + self.alt_t = alt_t + + def __str__(self): + return str(self.value) + + @classmethod + def create(cls, i, v): + return ChineseNumberDigit(i, v[0], v[1], v[2], v[3]) + + +class ChineseMath(ChineseChar): + """ + 中文数位字符 + """ + + def __init__(self, simplified, traditional, symbol, expression=None): + super(ChineseMath, self).__init__(simplified, traditional) + self.symbol = symbol + self.expression = expression + self.big_s = simplified + self.big_t = traditional + + +CC, CNU, CND, CM = ChineseChar, ChineseNumberUnit, ChineseNumberDigit, ChineseMath + + +class NumberSystem(object): + """ + 中文数字系统 + """ + pass + + +class MathSymbol(object): + """ + 用于中文数字系统的数学符号 (繁/简体), e.g. + positive = ['正', '正'] + negative = ['负', '負'] + point = ['点', '點'] + """ + + def __init__(self, positive, negative, point): + self.positive = positive + self.negative = negative + self.point = point + + def __iter__(self): + for v in self.__dict__.values(): + yield v + + +# class OtherSymbol(object): +# """ +# 其他符号 +# """ +# +# def __init__(self, sil): +# self.sil = sil +# +# def __iter__(self): +# for v in self.__dict__.values(): +# yield v + + +# ================================================================================ # +# basic utils +# ================================================================================ # +def create_system(numbering_type=NUMBERING_TYPES[1]): + """ + 根据数字系统类型返回创建相应的数字系统,默认为 mid + NUMBERING_TYPES = ['low', 'mid', 'high']: 中文数字系统类型 + low: '兆' = '亿' * '十' = $10^{9}$, '京' = '兆' * '十', etc. + mid: '兆' = '亿' * '万' = $10^{12}$, '京' = '兆' * '万', etc. + high: '兆' = '亿' * '亿' = $10^{16}$, '京' = '兆' * '兆', etc. + 返回对应的数字系统 + """ + + # chinese number units of '亿' and larger + all_larger_units = zip( + LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED, LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL) + larger_units = [CNU.create(i, v, numbering_type, False) + for i, v in enumerate(all_larger_units)] + # chinese number units of '十, 百, 千, 万' + all_smaller_units = zip( + SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED, SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL) + smaller_units = [CNU.create(i, v, small_unit=True) + for i, v in enumerate(all_smaller_units)] + # digis + chinese_digis = zip(CHINESE_DIGIS, CHINESE_DIGIS, + BIG_CHINESE_DIGIS_SIMPLIFIED, BIG_CHINESE_DIGIS_TRADITIONAL) + digits = [CND.create(i, v) for i, v in enumerate(chinese_digis)] + digits[0].alt_s, digits[0].alt_t = ZERO_ALT, ZERO_ALT + digits[1].alt_s, digits[1].alt_t = ONE_ALT, ONE_ALT + digits[2].alt_s, digits[2].alt_t = TWO_ALTS[0], TWO_ALTS[1] + + # symbols + positive_cn = CM(POSITIVE[0], POSITIVE[1], '+', lambda x: x) + negative_cn = CM(NEGATIVE[0], NEGATIVE[1], '-', lambda x: -x) + point_cn = CM(POINT[0], POINT[1], '.', lambda x, + y: float(str(x) + '.' + str(y))) + # sil_cn = CM(SIL[0], SIL[1], '-', lambda x, y: float(str(x) + '-' + str(y))) + system = NumberSystem() + system.units = smaller_units + larger_units + system.digits = digits + system.math = MathSymbol(positive_cn, negative_cn, point_cn) + # system.symbols = OtherSymbol(sil_cn) + return system + + +def chn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]): + def get_symbol(char, system): + for u in system.units: + if char in [u.traditional, u.simplified, u.big_s, u.big_t]: + return u + for d in system.digits: + if char in [d.traditional, d.simplified, d.big_s, d.big_t, d.alt_s, d.alt_t]: + return d + for m in system.math: + if char in [m.traditional, m.simplified]: + return m + + def string2symbols(chinese_string, system): + int_string, dec_string = chinese_string, '' + for p in [system.math.point.simplified, system.math.point.traditional]: + if p in chinese_string: + int_string, dec_string = chinese_string.split(p) + break + return [get_symbol(c, system) for c in int_string], \ + [get_symbol(c, system) for c in dec_string] + + def correct_symbols(integer_symbols, system): + """ + 一百八 to 一百八十 + 一亿一千三百万 to 一亿 一千万 三百万 + """ + + if integer_symbols and isinstance(integer_symbols[0], CNU): + if integer_symbols[0].power == 1: + integer_symbols = [system.digits[1]] + integer_symbols + + if len(integer_symbols) > 1: + if isinstance(integer_symbols[-1], CND) and isinstance(integer_symbols[-2], CNU): + integer_symbols.append( + CNU(integer_symbols[-2].power - 1, None, None, None, None)) + + result = [] + unit_count = 0 + for s in integer_symbols: + if isinstance(s, CND): + result.append(s) + unit_count = 0 + elif isinstance(s, CNU): + current_unit = CNU(s.power, None, None, None, None) + unit_count += 1 + + if unit_count == 1: + result.append(current_unit) + elif unit_count > 1: + for i in range(len(result)): + if isinstance(result[-i - 1], CNU) and result[-i - 1].power < current_unit.power: + result[-i - 1] = CNU(result[-i - 1].power + + current_unit.power, None, None, None, None) + return result + + def compute_value(integer_symbols): + """ + Compute the value. + When current unit is larger than previous unit, current unit * all previous units will be used as all previous units. + e.g. '两千万' = 2000 * 10000 not 2000 + 10000 + """ + value = [0] + last_power = 0 + for s in integer_symbols: + if isinstance(s, CND): + value[-1] = s.value + elif isinstance(s, CNU): + value[-1] *= pow(10, s.power) + if s.power > last_power: + value[:-1] = list(map(lambda v: v * + pow(10, s.power), value[:-1])) + last_power = s.power + value.append(0) + return sum(value) + + system = create_system(numbering_type) + int_part, dec_part = string2symbols(chinese_string, system) + int_part = correct_symbols(int_part, system) + int_str = str(compute_value(int_part)) + dec_str = ''.join([str(d.value) for d in dec_part]) + if dec_part: + return '{0}.{1}'.format(int_str, dec_str) + else: + return int_str + + +def num2chn(number_string, numbering_type=NUMBERING_TYPES[1], big=False, + traditional=False, alt_zero=False, alt_one=False, alt_two=True, + use_zeros=True, use_units=True): + def get_value(value_string, use_zeros=True): + + striped_string = value_string.lstrip('0') + + # record nothing if all zeros + if not striped_string: + return [] + + # record one digits + elif len(striped_string) == 1: + if use_zeros and len(value_string) != len(striped_string): + return [system.digits[0], system.digits[int(striped_string)]] + else: + return [system.digits[int(striped_string)]] + + # recursively record multiple digits + else: + result_unit = next(u for u in reversed( + system.units) if u.power < len(striped_string)) + result_string = value_string[:-result_unit.power] + return get_value(result_string) + [result_unit] + get_value(striped_string[-result_unit.power:]) + + system = create_system(numbering_type) + + int_dec = number_string.split('.') + if len(int_dec) == 1: + int_string = int_dec[0] + dec_string = "" + elif len(int_dec) == 2: + int_string = int_dec[0] + dec_string = int_dec[1] + else: + raise ValueError( + "invalid input num string with more than one dot: {}".format(number_string)) + + if use_units and len(int_string) > 1: + result_symbols = get_value(int_string) + else: + result_symbols = [system.digits[int(c)] for c in int_string] + dec_symbols = [system.digits[int(c)] for c in dec_string] + if dec_string: + result_symbols += [system.math.point] + dec_symbols + + if alt_two: + liang = CND(2, system.digits[2].alt_s, system.digits[2].alt_t, + system.digits[2].big_s, system.digits[2].big_t) + for i, v in enumerate(result_symbols): + if isinstance(v, CND) and v.value == 2: + next_symbol = result_symbols[i + + 1] if i < len(result_symbols) - 1 else None + previous_symbol = result_symbols[i - 1] if i > 0 else None + if isinstance(next_symbol, CNU) and isinstance(previous_symbol, (CNU, type(None))): + if next_symbol.power != 1 and ((previous_symbol is None) or (previous_symbol.power != 1)): + result_symbols[i] = liang + + # if big is True, '两' will not be used and `alt_two` has no impact on output + if big: + attr_name = 'big_' + if traditional: + attr_name += 't' + else: + attr_name += 's' + else: + if traditional: + attr_name = 'traditional' + else: + attr_name = 'simplified' + + result = ''.join([getattr(s, attr_name) for s in result_symbols]) + + # if not use_zeros: + # result = result.strip(getattr(system.digits[0], attr_name)) + + if alt_zero: + result = result.replace( + getattr(system.digits[0], attr_name), system.digits[0].alt_s) + + if alt_one: + result = result.replace( + getattr(system.digits[1], attr_name), system.digits[1].alt_s) + + for i, p in enumerate(POINT): + if result.startswith(p): + return CHINESE_DIGIS[0] + result + + # ^10, 11, .., 19 + if len(result) >= 2 and result[1] in [SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED[0], + SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL[0]] and \ + result[0] in [CHINESE_DIGIS[1], BIG_CHINESE_DIGIS_SIMPLIFIED[1], BIG_CHINESE_DIGIS_TRADITIONAL[1]]: + result = result[1:] + + return result + + +# ================================================================================ # +# different types of rewriters +# ================================================================================ # +class Cardinal: + """ + CARDINAL类 + """ + + def __init__(self, cardinal=None, chntext=None): + self.cardinal = cardinal + self.chntext = chntext + + def chntext2cardinal(self): + return chn2num(self.chntext) + + def cardinal2chntext(self): + return num2chn(self.cardinal) + + +class Digit: + """ + DIGIT类 + """ + + def __init__(self, digit=None, chntext=None): + self.digit = digit + self.chntext = chntext + + # def chntext2digit(self): + # return chn2num(self.chntext) + + def digit2chntext(self): + return num2chn(self.digit, alt_two=False, use_units=False) + + +class TelePhone: + """ + TELEPHONE类 + """ + + def __init__(self, telephone=None, raw_chntext=None, chntext=None): + self.telephone = telephone + self.raw_chntext = raw_chntext + self.chntext = chntext + + # def chntext2telephone(self): + # sil_parts = self.raw_chntext.split('') + # self.telephone = '-'.join([ + # str(chn2num(p)) for p in sil_parts + # ]) + # return self.telephone + + def telephone2chntext(self, fixed=False): + + if fixed: + sil_parts = self.telephone.split('-') + self.raw_chntext = ''.join([ + num2chn(part, alt_two=False, use_units=False) for part in sil_parts + ]) + self.chntext = self.raw_chntext.replace('', '') + else: + sp_parts = self.telephone.strip('+').split() + self.raw_chntext = ''.join([ + num2chn(part, alt_two=False, use_units=False) for part in sp_parts + ]) + self.chntext = self.raw_chntext.replace('', '') + return self.chntext + + +class Fraction: + """ + FRACTION类 + """ + + def __init__(self, fraction=None, chntext=None): + self.fraction = fraction + self.chntext = chntext + + def chntext2fraction(self): + denominator, numerator = self.chntext.split('分之') + return chn2num(numerator) + '/' + chn2num(denominator) + + def fraction2chntext(self): + numerator, denominator = self.fraction.split('/') + return num2chn(denominator) + '分之' + num2chn(numerator) + + +class Date: + """ + DATE类 + """ + + def __init__(self, date=None, chntext=None): + self.date = date + self.chntext = chntext + + # def chntext2date(self): + # chntext = self.chntext + # try: + # year, other = chntext.strip().split('年', maxsplit=1) + # year = Digit(chntext=year).digit2chntext() + '年' + # except ValueError: + # other = chntext + # year = '' + # if other: + # try: + # month, day = other.strip().split('月', maxsplit=1) + # month = Cardinal(chntext=month).chntext2cardinal() + '月' + # except ValueError: + # day = chntext + # month = '' + # if day: + # day = Cardinal(chntext=day[:-1]).chntext2cardinal() + day[-1] + # else: + # month = '' + # day = '' + # date = year + month + day + # self.date = date + # return self.date + + def date2chntext(self): + date = self.date + try: + year, other = date.strip().split('年', 1) + year = Digit(digit=year).digit2chntext() + '年' + except ValueError: + other = date + year = '' + if other: + try: + month, day = other.strip().split('月', 1) + month = Cardinal(cardinal=month).cardinal2chntext() + '月' + except ValueError: + day = date + month = '' + if day: + day = Cardinal(cardinal=day[:-1]).cardinal2chntext() + day[-1] + else: + month = '' + day = '' + chntext = year + month + day + self.chntext = chntext + return self.chntext + + +class Money: + """ + MONEY类 + """ + + def __init__(self, money=None, chntext=None): + self.money = money + self.chntext = chntext + + # def chntext2money(self): + # return self.money + + def money2chntext(self): + money = self.money + pattern = re.compile(r'(\d+(\.\d+)?)') + matchers = pattern.findall(money) + if matchers: + for matcher in matchers: + money = money.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext()) + self.chntext = money + return self.chntext + + +class Percentage: + """ + PERCENTAGE类 + """ + + def __init__(self, percentage=None, chntext=None): + self.percentage = percentage + self.chntext = chntext + + def chntext2percentage(self): + return chn2num(self.chntext.strip().strip('百分之')) + '%' + + def percentage2chntext(self): + return '百分之' + num2chn(self.percentage.strip().strip('%')) + + +# ================================================================================ # +# NSW Normalizer +# ================================================================================ # +class NSWNormalizer: + def __init__(self, raw_text): + self.raw_text = '^' + raw_text + '$' + self.norm_text = '' + + def _particular(self): + text = self.norm_text + pattern = re.compile(r"(([a-zA-Z]+)二([a-zA-Z]+))") + matchers = pattern.findall(text) + if matchers: + # print('particular') + for matcher in matchers: + text = text.replace(matcher[0], matcher[1] + '2' + matcher[2], 1) + self.norm_text = text + return self.norm_text + + def normalize(self, remove_punc=True): + text = self.raw_text + + # 规范化日期 + pattern = re.compile(r"\D+((([089]\d|(19|20)\d{2})年)?(\d{1,2}月(\d{1,2}[日号])?)?)") + matchers = pattern.findall(text) + if matchers: + # print('date') + for matcher in matchers: + text = text.replace(matcher[0], Date(date=matcher[0]).date2chntext(), 1) + + # 规范化金钱 + pattern = re.compile(r"\D+((\d+(\.\d+)?)[多余几]?" + CURRENCY_UNITS + r"(\d" + CURRENCY_UNITS + r"?)?)") + matchers = pattern.findall(text) + if matchers: + # print('money') + for matcher in matchers: + text = text.replace(matcher[0], Money(money=matcher[0]).money2chntext(), 1) + + # 规范化固话/手机号码 + # 手机 + # http://www.jihaoba.com/news/show/13680 + # 移动:139、138、137、136、135、134、159、158、157、150、151、152、188、187、182、183、184、178、198 + # 联通:130、131、132、156、155、186、185、176 + # 电信:133、153、189、180、181、177 + pattern = re.compile(r"\D((\+?86 ?)?1([38]\d|5[0-35-9]|7[678]|9[89])\d{8})\D") + matchers = pattern.findall(text) + if matchers: + # print('telephone') + for matcher in matchers: + text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(), 1) + # 固话 + pattern = re.compile(r"\D((0(10|2[1-3]|[3-9]\d{2})-?)?[1-9]\d{6,7})\D") + matchers = pattern.findall(text) + if matchers: + # print('fixed telephone') + for matcher in matchers: + text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(fixed=True), 1) + + # 规范化分数 + pattern = re.compile(r"(\d+/\d+)") + matchers = pattern.findall(text) + if matchers: + # print('fraction') + for matcher in matchers: + text = text.replace(matcher, Fraction(fraction=matcher).fraction2chntext(), 1) + + # 规范化百分数 + text = text.replace('%', '%') + pattern = re.compile(r"(\d+(\.\d+)?%)") + matchers = pattern.findall(text) + if matchers: + # print('percentage') + for matcher in matchers: + text = text.replace(matcher[0], Percentage(percentage=matcher[0]).percentage2chntext(), 1) + + # 规范化纯数+量词 + pattern = re.compile(r"(\d+(\.\d+)?)[多余几]?" + COM_QUANTIFIERS) + matchers = pattern.findall(text) + if matchers: + # print('cardinal+quantifier') + for matcher in matchers: + text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1) + + # 规范化数字编号 + pattern = re.compile(r"(\d{4,32})") + matchers = pattern.findall(text) + if matchers: + # print('digit') + for matcher in matchers: + text = text.replace(matcher, Digit(digit=matcher).digit2chntext(), 1) + + # 规范化纯数 + pattern = re.compile(r"(\d+(\.\d+)?)") + matchers = pattern.findall(text) + if matchers: + # print('cardinal') + for matcher in matchers: + text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1) + + self.norm_text = text + self._particular() + + text = self.norm_text.lstrip('^').rstrip('$') + if remove_punc: + # Punctuations removal + old_chars = CHINESE_PUNC_LIST + string.punctuation # includes all CN and EN punctuations + new_chars = ' ' * len(old_chars) + del_chars = '' + text = text.translate(str.maketrans(old_chars, new_chars, del_chars)) + return text + + +def nsw_test_case(raw_text): + print('I:' + raw_text) + print('O:' + NSWNormalizer(raw_text).normalize()) + print('') + + +def nsw_test(): + nsw_test_case('固话:0595-23865596或23880880。') + nsw_test_case('固话:0595-23865596或23880880。') + nsw_test_case('手机:+86 19859213959或15659451527。') + nsw_test_case('分数:32477/76391。') + nsw_test_case('百分数:80.03%。') + nsw_test_case('编号:31520181154418。') + nsw_test_case('纯数:2983.07克或12345.60米。') + nsw_test_case('日期:1999年2月20日或09年3月15号。') + nsw_test_case('金钱:12块5,34.5元,20.1万') + nsw_test_case('特殊:O2O或B2C。') + nsw_test_case('3456万吨') + nsw_test_case('2938个') + nsw_test_case('938') + nsw_test_case('今天吃了115个小笼包231个馒头') + nsw_test_case('有62%的概率') + + +if __name__ == '__main__': + # nsw_test() + + p = argparse.ArgumentParser() + p.add_argument('ifile', help='input filename, assume utf-8 encoding') + p.add_argument('ofile', help='output filename') + p.add_argument('--to_upper', action='store_true', help='convert to upper case') + p.add_argument('--to_lower', action='store_true', help='convert to lower case') + p.add_argument('--has_key', action='store_true', help="input text has Kaldi's key as first field.") + p.add_argument('--log_interval', type=int, default=10000, help='log interval in number of processed lines') + args = p.parse_args() + + ifile = codecs.open(args.ifile, 'r', 'utf8') + ofile = codecs.open(args.ofile, 'w+', 'utf8') + + n = 0 + for l in ifile: + key = '' + text = '' + if args.has_key: + cols = l.split(maxsplit=1) + key = cols[0] + if len(cols) == 2: + text = cols[1] + else: + text = '' + else: + text = l + + # cases + if args.to_upper and args.to_lower: + sys.stderr.write('text norm: to_upper OR to_lower?') + exit(1) + if args.to_upper: + text = text.upper() + if args.to_lower: + text = text.lower() + + # NSW(Non-Standard-Word) normalization + text = NSWNormalizer(text).normalize() + + # + if args.has_key: + ofile.write(key + '\t' + text) + else: + ofile.write(text) + + n += 1 + if n % args.log_interval == 0: + sys.stderr.write("text norm: {} lines done.\n".format(n)) + + sys.stderr.write("text norm: {} lines done in total.\n".format(n)) + + ifile.close() + ofile.close() diff --git a/NeuralSeq/utils/training_utils.py b/NeuralSeq/utils/training_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..409b15388790b1aadb24632313bdd1f41b4b06ac --- /dev/null +++ b/NeuralSeq/utils/training_utils.py @@ -0,0 +1,27 @@ +from utils.hparams import hparams + + +class RSQRTSchedule(object): + def __init__(self, optimizer): + super().__init__() + self.optimizer = optimizer + self.constant_lr = hparams['lr'] + self.warmup_updates = hparams['warmup_updates'] + self.hidden_size = hparams['hidden_size'] + self.lr = hparams['lr'] + for param_group in optimizer.param_groups: + param_group['lr'] = self.lr + self.step(0) + + def step(self, num_updates): + constant_lr = self.constant_lr + warmup = min(num_updates / self.warmup_updates, 1.0) + rsqrt_decay = max(self.warmup_updates, num_updates) ** -0.5 + rsqrt_hidden = self.hidden_size ** -0.5 + self.lr = max(constant_lr * warmup * rsqrt_decay * rsqrt_hidden, 1e-7) + for param_group in self.optimizer.param_groups: + param_group['lr'] = self.lr + return self.lr + + def get_lr(self): + return self.optimizer.param_groups[0]['lr'] diff --git a/NeuralSeq/utils/tts_utils.py b/NeuralSeq/utils/tts_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..869b8b4bc495f119a4f09fa8436b6b6aec02a81d --- /dev/null +++ b/NeuralSeq/utils/tts_utils.py @@ -0,0 +1,398 @@ +from collections import defaultdict +import torch +import torch.nn.functional as F + + +def make_positions(tensor, padding_idx): + """Replace non-padding symbols with their position numbers. + + Position numbers begin at padding_idx+1. Padding symbols are ignored. + """ + # The series of casts and type-conversions here are carefully + # balanced to both work with ONNX export and XLA. In particular XLA + # prefers ints, cumsum defaults to output longs, and ONNX doesn't know + # how to handle the dtype kwarg in cumsum. + mask = tensor.ne(padding_idx).int() + return ( + torch.cumsum(mask, dim=1).type_as(mask) * mask + ).long() + padding_idx + + +def softmax(x, dim): + return F.softmax(x, dim=dim, dtype=torch.float32) + + +def sequence_mask(lengths, maxlen, dtype=torch.bool): + if maxlen is None: + maxlen = lengths.max() + mask = ~(torch.ones((len(lengths), maxlen)).to(lengths.device).cumsum(dim=1).t() > lengths).t() + mask.type(dtype) + return mask + + +INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0) + + +def _get_full_incremental_state_key(module_instance, key): + module_name = module_instance.__class__.__name__ + + # assign a unique ID to each module instance, so that incremental state is + # not shared across module instances + if not hasattr(module_instance, '_instance_id'): + INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1 + module_instance._instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name] + + return '{}.{}.{}'.format(module_name, module_instance._instance_id, key) + + +def get_incremental_state(module, incremental_state, key): + """Helper for getting incremental state for an nn.Module.""" + full_key = _get_full_incremental_state_key(module, key) + if incremental_state is None or full_key not in incremental_state: + return None + return incremental_state[full_key] + + +def set_incremental_state(module, incremental_state, key, value): + """Helper for setting incremental state for an nn.Module.""" + if incremental_state is not None: + full_key = _get_full_incremental_state_key(module, key) + incremental_state[full_key] = value + + +def fill_with_neg_inf(t): + """FP16-compatible function that fills a tensor with -inf.""" + return t.float().fill_(float('-inf')).type_as(t) + + +def fill_with_neg_inf2(t): + """FP16-compatible function that fills a tensor with -inf.""" + return t.float().fill_(-1e8).type_as(t) + + +def get_focus_rate(attn, src_padding_mask=None, tgt_padding_mask=None): + ''' + attn: bs x L_t x L_s + ''' + if src_padding_mask is not None: + attn = attn * (1 - src_padding_mask.float())[:, None, :] + + if tgt_padding_mask is not None: + attn = attn * (1 - tgt_padding_mask.float())[:, :, None] + + focus_rate = attn.max(-1).values.sum(-1) + focus_rate = focus_rate / attn.sum(-1).sum(-1) + return focus_rate + + +def get_phone_coverage_rate(attn, src_padding_mask=None, src_seg_mask=None, tgt_padding_mask=None): + ''' + attn: bs x L_t x L_s + ''' + src_mask = attn.new(attn.size(0), attn.size(-1)).bool().fill_(False) + if src_padding_mask is not None: + src_mask |= src_padding_mask + if src_seg_mask is not None: + src_mask |= src_seg_mask + + attn = attn * (1 - src_mask.float())[:, None, :] + if tgt_padding_mask is not None: + attn = attn * (1 - tgt_padding_mask.float())[:, :, None] + + phone_coverage_rate = attn.max(1).values.sum(-1) + # phone_coverage_rate = phone_coverage_rate / attn.sum(-1).sum(-1) + phone_coverage_rate = phone_coverage_rate / (1 - src_mask.float()).sum(-1) + return phone_coverage_rate + + +def get_diagonal_focus_rate(attn, attn_ks, target_len, src_padding_mask=None, tgt_padding_mask=None, + band_mask_factor=5, band_width=50): + ''' + attn: bx x L_t x L_s + attn_ks: shape: tensor with shape [batch_size], input_lens/output_lens + + diagonal: y=k*x (k=attn_ks, x:output, y:input) + 1 0 0 + 0 1 0 + 0 0 1 + y>=k*(x-width) and y<=k*(x+width):1 + else:0 + ''' + # width = min(target_len/band_mask_factor, 50) + width1 = target_len / band_mask_factor + width2 = target_len.new(target_len.size()).fill_(band_width) + width = torch.where(width1 < width2, width1, width2).float() + base = torch.ones(attn.size()).to(attn.device) + zero = torch.zeros(attn.size()).to(attn.device) + x = torch.arange(0, attn.size(1)).to(attn.device)[None, :, None].float() * base + y = torch.arange(0, attn.size(2)).to(attn.device)[None, None, :].float() * base + cond = (y - attn_ks[:, None, None] * x) + cond1 = cond + attn_ks[:, None, None] * width[:, None, None] + cond2 = cond - attn_ks[:, None, None] * width[:, None, None] + mask1 = torch.where(cond1 < 0, zero, base) + mask2 = torch.where(cond2 > 0, zero, base) + mask = mask1 * mask2 + + if src_padding_mask is not None: + attn = attn * (1 - src_padding_mask.float())[:, None, :] + if tgt_padding_mask is not None: + attn = attn * (1 - tgt_padding_mask.float())[:, :, None] + + diagonal_attn = attn * mask + diagonal_focus_rate = diagonal_attn.sum(-1).sum(-1) / attn.sum(-1).sum(-1) + return diagonal_focus_rate, mask + + +def select_attn(attn_logits, type='best'): + """ + + :param attn_logits: [n_layers, B, n_head, T_sp, T_txt] + :return: + """ + encdec_attn = torch.stack(attn_logits, 0).transpose(1, 2) + # [n_layers * n_head, B, T_sp, T_txt] + encdec_attn = (encdec_attn.reshape([-1, *encdec_attn.shape[2:]])).softmax(-1) + if type == 'best': + indices = encdec_attn.max(-1).values.sum(-1).argmax(0) + encdec_attn = encdec_attn.gather( + 0, indices[None, :, None, None].repeat(1, 1, encdec_attn.size(-2), encdec_attn.size(-1)))[0] + return encdec_attn + elif type == 'mean': + return encdec_attn.mean(0) + + +def make_pad_mask(lengths, xs=None, length_dim=-1): + """Make mask tensor containing indices of padded part. + Args: + lengths (LongTensor or List): Batch of lengths (B,). + xs (Tensor, optional): The reference tensor. + If set, masks will be the same shape as this tensor. + length_dim (int, optional): Dimension indicator of the above tensor. + See the example. + Returns: + Tensor: Mask tensor containing indices of padded part. + dtype=torch.uint8 in PyTorch 1.2- + dtype=torch.bool in PyTorch 1.2+ (including 1.2) + Examples: + With only lengths. + >>> lengths = [5, 3, 2] + >>> make_non_pad_mask(lengths) + masks = [[0, 0, 0, 0 ,0], + [0, 0, 0, 1, 1], + [0, 0, 1, 1, 1]] + With the reference tensor. + >>> xs = torch.zeros((3, 2, 4)) + >>> make_pad_mask(lengths, xs) + tensor([[[0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 0, 0, 1], + [0, 0, 0, 1]], + [[0, 0, 1, 1], + [0, 0, 1, 1]]], dtype=torch.uint8) + >>> xs = torch.zeros((3, 2, 6)) + >>> make_pad_mask(lengths, xs) + tensor([[[0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1]], + [[0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1]], + [[0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8) + With the reference tensor and dimension indicator. + >>> xs = torch.zeros((3, 6, 6)) + >>> make_pad_mask(lengths, xs, 1) + tensor([[[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1]], + [[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1]], + [[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8) + >>> make_pad_mask(lengths, xs, 2) + tensor([[[0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1]], + [[0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1]], + [[0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8) + """ + if length_dim == 0: + raise ValueError("length_dim cannot be 0: {}".format(length_dim)) + + if not isinstance(lengths, list): + lengths = lengths.tolist() + bs = int(len(lengths)) + if xs is None: + maxlen = int(max(lengths)) + else: + maxlen = xs.size(length_dim) + + seq_range = torch.arange(0, maxlen, dtype=torch.int64) + seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen) + seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1) + mask = seq_range_expand >= seq_length_expand + + if xs is not None: + assert xs.size(0) == bs, (xs.size(0), bs) + + if length_dim < 0: + length_dim = xs.dim() + length_dim + # ind = (:, None, ..., None, :, , None, ..., None) + ind = tuple( + slice(None) if i in (0, length_dim) else None for i in range(xs.dim()) + ) + mask = mask[ind].expand_as(xs).to(xs.device) + return mask + + +def make_non_pad_mask(lengths, xs=None, length_dim=-1): + """Make mask tensor containing indices of non-padded part. + Args: + lengths (LongTensor or List): Batch of lengths (B,). + xs (Tensor, optional): The reference tensor. + If set, masks will be the same shape as this tensor. + length_dim (int, optional): Dimension indicator of the above tensor. + See the example. + Returns: + ByteTensor: mask tensor containing indices of padded part. + dtype=torch.uint8 in PyTorch 1.2- + dtype=torch.bool in PyTorch 1.2+ (including 1.2) + Examples: + With only lengths. + >>> lengths = [5, 3, 2] + >>> make_non_pad_mask(lengths) + masks = [[1, 1, 1, 1 ,1], + [1, 1, 1, 0, 0], + [1, 1, 0, 0, 0]] + With the reference tensor. + >>> xs = torch.zeros((3, 2, 4)) + >>> make_non_pad_mask(lengths, xs) + tensor([[[1, 1, 1, 1], + [1, 1, 1, 1]], + [[1, 1, 1, 0], + [1, 1, 1, 0]], + [[1, 1, 0, 0], + [1, 1, 0, 0]]], dtype=torch.uint8) + >>> xs = torch.zeros((3, 2, 6)) + >>> make_non_pad_mask(lengths, xs) + tensor([[[1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0]], + [[1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0]], + [[1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8) + With the reference tensor and dimension indicator. + >>> xs = torch.zeros((3, 6, 6)) + >>> make_non_pad_mask(lengths, xs, 1) + tensor([[[1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0]], + [[1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]], + [[1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]], dtype=torch.uint8) + >>> make_non_pad_mask(lengths, xs, 2) + tensor([[[1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0]], + [[1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0]], + [[1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8) + """ + return ~make_pad_mask(lengths, xs, length_dim) + + +def get_mask_from_lengths(lengths): + max_len = torch.max(lengths).item() + ids = torch.arange(0, max_len).to(lengths.device) + mask = (ids < lengths.unsqueeze(1)).bool() + return mask + + +def group_hidden_by_segs(h, seg_ids, max_len): + """ + + :param h: [B, T, H] + :param seg_ids: [B, T] + :return: h_ph: [B, T_ph, H] + """ + B, T, H = h.shape + h_gby_segs = h.new_zeros([B, max_len + 1, H]).scatter_add_(1, seg_ids[:, :, None].repeat([1, 1, H]), h) + all_ones = h.new_ones(h.shape[:2]) + cnt_gby_segs = h.new_zeros([B, max_len + 1]).scatter_add_(1, seg_ids, all_ones).contiguous() + h_gby_segs = h_gby_segs[:, 1:] + cnt_gby_segs = cnt_gby_segs[:, 1:] + h_gby_segs = h_gby_segs / torch.clamp(cnt_gby_segs[:, :, None], min=1) + return h_gby_segs, cnt_gby_segs + +def mel2token_to_dur(mel2token, T_txt=None, max_dur=None): + is_torch = isinstance(mel2token, torch.Tensor) + has_batch_dim = True + if not is_torch: + mel2token = torch.LongTensor(mel2token) + if T_txt is None: + T_txt = mel2token.max() + if len(mel2token.shape) == 1: + mel2token = mel2token[None, ...] + has_batch_dim = False + B, _ = mel2token.shape + dur = mel2token.new_zeros(B, T_txt + 1).scatter_add(1, mel2token, torch.ones_like(mel2token)) + dur = dur[:, 1:] + if max_dur is not None: + dur = dur.clamp(max=max_dur) + if not is_torch: + dur = dur.numpy() + if not has_batch_dim: + dur = dur[0] + return dur + +def expand_word2ph(word_encoding, ph2word): + word_encoding = F.pad(word_encoding,[0,0,1,0]) + ph2word_ = ph2word[:, :, None].repeat([1, 1, word_encoding.shape[-1]]) + out = torch.gather(word_encoding, 1, ph2word_) # [B, T, H] + return out \ No newline at end of file diff --git a/NeuralSeq/vocoders/__init__.py b/NeuralSeq/vocoders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..66c318857ce48048437dede7072901ad6471b8fc --- /dev/null +++ b/NeuralSeq/vocoders/__init__.py @@ -0,0 +1 @@ +from vocoders import hifigan diff --git a/NeuralSeq/vocoders/__pycache__/__init__.cpython-37.pyc b/NeuralSeq/vocoders/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b34d2a2f64b654b1e587e559d17e9f1840d426f Binary files /dev/null and b/NeuralSeq/vocoders/__pycache__/__init__.cpython-37.pyc differ diff --git a/NeuralSeq/vocoders/__pycache__/__init__.cpython-38.pyc b/NeuralSeq/vocoders/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba12c81b89681c8ba2df8a8dc99244a763cfd916 Binary files /dev/null and b/NeuralSeq/vocoders/__pycache__/__init__.cpython-38.pyc differ diff --git a/NeuralSeq/vocoders/__pycache__/base_vocoder.cpython-37.pyc b/NeuralSeq/vocoders/__pycache__/base_vocoder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f68aa841627a09df3a606581cdf2a2d076119f8 Binary files /dev/null and b/NeuralSeq/vocoders/__pycache__/base_vocoder.cpython-37.pyc differ diff --git a/NeuralSeq/vocoders/__pycache__/base_vocoder.cpython-38.pyc b/NeuralSeq/vocoders/__pycache__/base_vocoder.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc7ff08db6c61c71a1c3d7a9a37a17109647e585 Binary files /dev/null and b/NeuralSeq/vocoders/__pycache__/base_vocoder.cpython-38.pyc differ diff --git a/NeuralSeq/vocoders/__pycache__/hifigan.cpython-37.pyc b/NeuralSeq/vocoders/__pycache__/hifigan.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ceae99b1e5938a9173d5234f87be4d6a79580654 Binary files /dev/null and b/NeuralSeq/vocoders/__pycache__/hifigan.cpython-37.pyc differ diff --git a/NeuralSeq/vocoders/__pycache__/hifigan.cpython-38.pyc b/NeuralSeq/vocoders/__pycache__/hifigan.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56f086a360b1a9053cfab75cb22330388ba8b00e Binary files /dev/null and b/NeuralSeq/vocoders/__pycache__/hifigan.cpython-38.pyc differ diff --git a/NeuralSeq/vocoders/__pycache__/pwg.cpython-37.pyc b/NeuralSeq/vocoders/__pycache__/pwg.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfeea94ef12d84c0b63dac0390854394fa6e1375 Binary files /dev/null and b/NeuralSeq/vocoders/__pycache__/pwg.cpython-37.pyc differ diff --git a/NeuralSeq/vocoders/__pycache__/pwg.cpython-38.pyc b/NeuralSeq/vocoders/__pycache__/pwg.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e53a4580833ab0fc250b56e7e9182db9bd4390d Binary files /dev/null and b/NeuralSeq/vocoders/__pycache__/pwg.cpython-38.pyc differ diff --git a/NeuralSeq/vocoders/__pycache__/vocoder_utils.cpython-37.pyc b/NeuralSeq/vocoders/__pycache__/vocoder_utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc3da72d7f0ffcb038c393c9c9a86e5e493e53f0 Binary files /dev/null and b/NeuralSeq/vocoders/__pycache__/vocoder_utils.cpython-37.pyc differ diff --git a/NeuralSeq/vocoders/__pycache__/vocoder_utils.cpython-38.pyc b/NeuralSeq/vocoders/__pycache__/vocoder_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cd36ebb7a39cbdf6fed59c74cbdd37aea64c2cc Binary files /dev/null and b/NeuralSeq/vocoders/__pycache__/vocoder_utils.cpython-38.pyc differ diff --git a/NeuralSeq/vocoders/base_vocoder.py b/NeuralSeq/vocoders/base_vocoder.py new file mode 100644 index 0000000000000000000000000000000000000000..fe49a9e4f790ecdc5e76d60a23f96602b59fc48d --- /dev/null +++ b/NeuralSeq/vocoders/base_vocoder.py @@ -0,0 +1,39 @@ +import importlib +VOCODERS = {} + + +def register_vocoder(cls): + VOCODERS[cls.__name__.lower()] = cls + VOCODERS[cls.__name__] = cls + return cls + + +def get_vocoder_cls(hparams): + if hparams['vocoder'] in VOCODERS: + return VOCODERS[hparams['vocoder']] + else: + vocoder_cls = hparams['vocoder'] + pkg = ".".join(vocoder_cls.split(".")[:-1]) + cls_name = vocoder_cls.split(".")[-1] + vocoder_cls = getattr(importlib.import_module(pkg), cls_name) + return vocoder_cls + + +class BaseVocoder: + def spec2wav(self, mel): + """ + + :param mel: [T, 80] + :return: wav: [T'] + """ + + raise NotImplementedError + + @staticmethod + def wav2spec(wav_fn): + """ + + :param wav_fn: str + :return: wav, mel: [T, 80] + """ + raise NotImplementedError diff --git a/NeuralSeq/vocoders/hifigan.py b/NeuralSeq/vocoders/hifigan.py new file mode 100644 index 0000000000000000000000000000000000000000..810d3c931b556387f8a2e85537f4964add1e76b0 --- /dev/null +++ b/NeuralSeq/vocoders/hifigan.py @@ -0,0 +1,76 @@ +import glob +import json +import os +import re + +import librosa +import torch + +import utils +from modules.hifigan.hifigan import HifiGanGenerator +from utils.hparams import hparams, set_hparams +from vocoders.base_vocoder import register_vocoder +from vocoders.pwg import PWG +from vocoders.vocoder_utils import denoise + + +def load_model(config_path, checkpoint_path): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + ckpt_dict = torch.load(checkpoint_path, map_location="cpu") + if '.yaml' in config_path: + config = set_hparams(config_path, global_hparams=False) + state = ckpt_dict["state_dict"]["model_gen"] + elif '.json' in config_path: + config = json.load(open(config_path, 'r')) + state = ckpt_dict["generator"] + + model = HifiGanGenerator(config) + model.load_state_dict(state, strict=True) + model.remove_weight_norm() + model = model.eval().to(device) + print(f"| Loaded model parameters from {checkpoint_path}.") + print(f"| HifiGAN device: {device}.") + return model, config, device + + +total_time = 0 + + +@register_vocoder +class HifiGAN(PWG): + def __init__(self): + base_dir = hparams['vocoder_ckpt'] + config_path = f'{base_dir}/config.yaml' + if os.path.exists(config_path): + ckpt = sorted(glob.glob(f'{base_dir}/model_ckpt_steps_*.ckpt'), key= + lambda x: int(re.findall(f'{base_dir}/model_ckpt_steps_(\d+).ckpt', x)[0]))[-1] + print('| load HifiGAN: ', ckpt) + self.model, self.config, self.device = load_model(config_path=config_path, checkpoint_path=ckpt) + else: + config_path = f'{base_dir}/config.json' + ckpt = f'{base_dir}/generator_v1' + if os.path.exists(config_path): + self.model, self.config, self.device = load_model(config_path=config_path, checkpoint_path=ckpt) + + def spec2wav(self, mel, **kwargs): + device = self.device + with torch.no_grad(): + c = torch.FloatTensor(mel).unsqueeze(0).transpose(2, 1).to(device) + with utils.Timer('hifigan', print_time=hparams['profile_infer']): + f0 = kwargs.get('f0') + if f0 is not None and hparams.get('use_nsf'): + f0 = torch.FloatTensor(f0[None, :]).to(device) + y = self.model(c, f0).view(-1) + else: + y = self.model(c).view(-1) + wav_out = y.cpu().numpy() + if hparams.get('vocoder_denoise_c', 0.0) > 0: + wav_out = denoise(wav_out, v=hparams['vocoder_denoise_c']) + return wav_out + + # @staticmethod + # def wav2spec(wav_fn, **kwargs): + # wav, _ = librosa.core.load(wav_fn, sr=hparams['audio_sample_rate']) + # wav_torch = torch.FloatTensor(wav)[None, :] + # mel = mel_spectrogram(wav_torch, hparams).numpy()[0] + # return wav, mel.T diff --git a/NeuralSeq/vocoders/pwg.py b/NeuralSeq/vocoders/pwg.py new file mode 100644 index 0000000000000000000000000000000000000000..ca9b6891ab2ba5cb413eeca97a41534e5db129d5 --- /dev/null +++ b/NeuralSeq/vocoders/pwg.py @@ -0,0 +1,137 @@ +import glob +import re +import librosa +import torch +import yaml +from sklearn.preprocessing import StandardScaler +from torch import nn +from modules.parallel_wavegan.models import ParallelWaveGANGenerator +from modules.parallel_wavegan.utils import read_hdf5 +from utils.hparams import hparams +from utils.pitch_utils import f0_to_coarse +from vocoders.base_vocoder import BaseVocoder, register_vocoder +import numpy as np + + +def load_pwg_model(config_path, checkpoint_path, stats_path): + # load config + with open(config_path) as f: + config = yaml.load(f, Loader=yaml.Loader) + + # setup + if torch.cuda.is_available(): + device = torch.device("cuda") + else: + device = torch.device("cpu") + model = ParallelWaveGANGenerator(**config["generator_params"]) + + ckpt_dict = torch.load(checkpoint_path, map_location="cpu") + if 'state_dict' not in ckpt_dict: # official vocoder + model.load_state_dict(torch.load(checkpoint_path, map_location="cpu")["model"]["generator"]) + scaler = StandardScaler() + if config["format"] == "hdf5": + scaler.mean_ = read_hdf5(stats_path, "mean") + scaler.scale_ = read_hdf5(stats_path, "scale") + elif config["format"] == "npy": + scaler.mean_ = np.load(stats_path)[0] + scaler.scale_ = np.load(stats_path)[1] + else: + raise ValueError("support only hdf5 or npy format.") + else: # custom PWG vocoder + fake_task = nn.Module() + fake_task.model_gen = model + fake_task.load_state_dict(torch.load(checkpoint_path, map_location="cpu")["state_dict"], strict=False) + scaler = None + + model.remove_weight_norm() + model = model.eval().to(device) + print(f"| Loaded model parameters from {checkpoint_path}.") + print(f"| PWG device: {device}.") + return model, scaler, config, device + + +@register_vocoder +class PWG(BaseVocoder): + def __init__(self): + if hparams['vocoder_ckpt'] == '': # load LJSpeech PWG pretrained model + base_dir = 'wavegan_pretrained' + ckpts = glob.glob(f'{base_dir}/checkpoint-*steps.pkl') + ckpt = sorted(ckpts, key= + lambda x: int(re.findall(f'{base_dir}/checkpoint-(\d+)steps.pkl', x)[0]))[-1] + config_path = f'{base_dir}/config.yaml' + print('| load PWG: ', ckpt) + self.model, self.scaler, self.config, self.device = load_pwg_model( + config_path=config_path, + checkpoint_path=ckpt, + stats_path=f'{base_dir}/stats.h5', + ) + else: + base_dir = hparams['vocoder_ckpt'] + print(base_dir) + config_path = f'{base_dir}/config.yaml' + ckpt = sorted(glob.glob(f'{base_dir}/model_ckpt_steps_*.ckpt'), key= + lambda x: int(re.findall(f'{base_dir}/model_ckpt_steps_(\d+).ckpt', x)[0]))[-1] + print('| load PWG: ', ckpt) + self.scaler = None + self.model, _, self.config, self.device = load_pwg_model( + config_path=config_path, + checkpoint_path=ckpt, + stats_path=f'{base_dir}/stats.h5', + ) + + def spec2wav(self, mel, **kwargs): + # start generation + config = self.config + device = self.device + pad_size = (config["generator_params"]["aux_context_window"], + config["generator_params"]["aux_context_window"]) + c = mel + if self.scaler is not None: + c = self.scaler.transform(c) + + with torch.no_grad(): + z = torch.randn(1, 1, c.shape[0] * config["hop_size"]).to(device) + c = np.pad(c, (pad_size, (0, 0)), "edge") + c = torch.FloatTensor(c).unsqueeze(0).transpose(2, 1).to(device) + p = kwargs.get('f0') + if p is not None: + p = f0_to_coarse(p) + p = np.pad(p, (pad_size,), "edge") + p = torch.LongTensor(p[None, :]).to(device) + y = self.model(z, c, p).view(-1) + wav_out = y.cpu().numpy() + return wav_out + + @staticmethod + def wav2spec(wav_fn, return_linear=False): + from data_gen.tts.data_gen_utils import process_utterance + res = process_utterance( + wav_fn, fft_size=hparams['fft_size'], + hop_size=hparams['hop_size'], + win_length=hparams['win_size'], + num_mels=hparams['audio_num_mel_bins'], + fmin=hparams['fmin'], + fmax=hparams['fmax'], + sample_rate=hparams['audio_sample_rate'], + loud_norm=hparams['loud_norm'], + min_level_db=hparams['min_level_db'], + return_linear=return_linear, vocoder='pwg', eps=float(hparams.get('wav2spec_eps', 1e-10))) + if return_linear: + return res[0], res[1].T, res[2].T # [T, 80], [T, n_fft] + else: + return res[0], res[1].T + + @staticmethod + def wav2mfcc(wav_fn): + fft_size = hparams['fft_size'] + hop_size = hparams['hop_size'] + win_length = hparams['win_size'] + sample_rate = hparams['audio_sample_rate'] + wav, _ = librosa.core.load(wav_fn, sr=sample_rate) + mfcc = librosa.feature.mfcc(y=wav, sr=sample_rate, n_mfcc=13, + n_fft=fft_size, hop_length=hop_size, + win_length=win_length, pad_mode="constant", power=1.0) + mfcc_delta = librosa.feature.delta(mfcc, order=1) + mfcc_delta_delta = librosa.feature.delta(mfcc, order=2) + mfcc = np.concatenate([mfcc, mfcc_delta, mfcc_delta_delta]).T + return mfcc diff --git a/NeuralSeq/vocoders/vocoder_utils.py b/NeuralSeq/vocoders/vocoder_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..db5d5ca1765928e4b047db04435a8a39b52592ca --- /dev/null +++ b/NeuralSeq/vocoders/vocoder_utils.py @@ -0,0 +1,15 @@ +import librosa + +from utils.hparams import hparams +import numpy as np + + +def denoise(wav, v=0.1): + spec = librosa.stft(y=wav, n_fft=hparams['fft_size'], hop_length=hparams['hop_size'], + win_length=hparams['win_size'], pad_mode='constant') + spec_m = np.abs(spec) + spec_m = np.clip(spec_m - v, a_min=0, a_max=None) + spec_a = np.angle(spec) + + return librosa.istft(spec_m * np.exp(1j * spec_a), hop_length=hparams['hop_size'], + win_length=hparams['win_size']) diff --git a/text_to_speech/TTS_binding.py b/text_to_speech/TTS_binding.py new file mode 100644 index 0000000000000000000000000000000000000000..f90c581acc887b69619328abbfb6aa4b9f124647 --- /dev/null +++ b/text_to_speech/TTS_binding.py @@ -0,0 +1,126 @@ +import torch +import os + + +class TTSInference: + def __init__(self, device=None): + print("Initializing TTS model to %s" % device) + from .tasks.tts.tts_utils import load_data_preprocessor + from .utils.commons.hparams import set_hparams + if device is None: + device = 'cuda' if torch.cuda.is_available() else 'cpu' + self.hparams = set_hparams("text_to_speech/checkpoints/ljspeech/ps_adv_baseline/config.yaml") + self.device = device + self.data_dir = 'text_to_speech/checkpoints/ljspeech/data_info' + self.preprocessor, self.preprocess_args = load_data_preprocessor() + self.ph_encoder, self.word_encoder = self.preprocessor.load_dict(self.data_dir) + self.spk_map = self.preprocessor.load_spk_map(self.data_dir) + self.model = self.build_model() + self.model.eval() + self.model.to(self.device) + self.vocoder = self.build_vocoder() + self.vocoder.eval() + self.vocoder.to(self.device) + print("TTS loaded!") + + def build_model(self): + from .utils.commons.ckpt_utils import load_ckpt + from .modules.tts.portaspeech.portaspeech import PortaSpeech + + ph_dict_size = len(self.ph_encoder) + word_dict_size = len(self.word_encoder) + model = PortaSpeech(ph_dict_size, word_dict_size, self.hparams) + load_ckpt(model, 'text_to_speech/checkpoints/ljspeech/ps_adv_baseline', 'model') + model.to(self.device) + with torch.no_grad(): + model.store_inverse_all() + model.eval() + return model + + def forward_model(self, inp): + sample = self.input_to_batch(inp) + with torch.no_grad(): + output = self.model( + sample['txt_tokens'], + sample['word_tokens'], + ph2word=sample['ph2word'], + word_len=sample['word_lengths'].max(), + infer=True, + forward_post_glow=True, + spk_id=sample.get('spk_ids') + ) + mel_out = output['mel_out'] + wav_out = self.run_vocoder(mel_out) + wav_out = wav_out.cpu().numpy() + return wav_out[0] + + def build_vocoder(self): + from .utils.commons.hparams import set_hparams + from .modules.vocoder.hifigan.hifigan import HifiGanGenerator + from .utils.commons.ckpt_utils import load_ckpt + base_dir = 'text_to_speech/checkpoints/hifi_lj' + config_path = f'{base_dir}/config.yaml' + config = set_hparams(config_path, global_hparams=False) + vocoder = HifiGanGenerator(config) + load_ckpt(vocoder, base_dir, 'model_gen') + return vocoder + + def run_vocoder(self, c): + c = c.transpose(2, 1) + y = self.vocoder(c)[:, 0] + return y + + def preprocess_input(self, inp): + """ + + :param inp: {'text': str, 'item_name': (str, optional), 'spk_name': (str, optional)} + :return: + """ + preprocessor, preprocess_args = self.preprocessor, self.preprocess_args + text_raw = inp['text'] + item_name = inp.get('item_name', '') + spk_name = inp.get('spk_name', '') + ph, txt, word, ph2word, ph_gb_word = preprocessor.txt_to_ph( + preprocessor.txt_processor, text_raw, preprocess_args) + word_token = self.word_encoder.encode(word) + ph_token = self.ph_encoder.encode(ph) + spk_id = self.spk_map[spk_name] + item = {'item_name': item_name, 'text': txt, 'ph': ph, 'spk_id': spk_id, + 'ph_token': ph_token, 'word_token': word_token, 'ph2word': ph2word, + 'ph_words':ph_gb_word, 'words': word} + item['ph_len'] = len(item['ph_token']) + return item + + def input_to_batch(self, item): + item_names = [item['item_name']] + text = [item['text']] + ph = [item['ph']] + txt_tokens = torch.LongTensor(item['ph_token'])[None, :].to(self.device) + txt_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device) + word_tokens = torch.LongTensor(item['word_token'])[None, :].to(self.device) + word_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device) + ph2word = torch.LongTensor(item['ph2word'])[None, :].to(self.device) + spk_ids = torch.LongTensor(item['spk_id'])[None, :].to(self.device) + batch = { + 'item_name': item_names, + 'text': text, + 'ph': ph, + 'txt_tokens': txt_tokens, + 'txt_lengths': txt_lengths, + 'word_tokens': word_tokens, + 'word_lengths': word_lengths, + 'ph2word': ph2word, + 'spk_ids': spk_ids, + } + return batch + + def postprocess_output(self, output): + return output + + def infer_once(self, inp): + inp = self.preprocess_input(inp) + output = self.forward_model(inp) + output = self.postprocess_output(output) + return output + + diff --git a/text_to_speech/__init__.py b/text_to_speech/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/text_to_speech/__pycache__/TTS_binding.cpython-38.pyc b/text_to_speech/__pycache__/TTS_binding.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d5a8cf9012aab59637411a8070d961ed4071875 Binary files /dev/null and b/text_to_speech/__pycache__/TTS_binding.cpython-38.pyc differ diff --git a/text_to_speech/__pycache__/__init__.cpython-38.pyc b/text_to_speech/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4f392f3844cdb8d7225c197b95aff59d20e0c7d Binary files /dev/null and b/text_to_speech/__pycache__/__init__.cpython-38.pyc differ diff --git a/text_to_speech/checkpoints/hifi_lj/.DS_Store b/text_to_speech/checkpoints/hifi_lj/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..78b73d2e40983fa4dc1c45102595e79dc458b322 Binary files /dev/null and b/text_to_speech/checkpoints/hifi_lj/.DS_Store differ diff --git a/text_to_speech/checkpoints/hifi_lj/config.yaml b/text_to_speech/checkpoints/hifi_lj/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..36c0581e2e5e8b0d2ed443450fe52a1d31159a8d --- /dev/null +++ b/text_to_speech/checkpoints/hifi_lj/config.yaml @@ -0,0 +1,207 @@ +accumulate_grad_batches: 1 +adam_b1: 0.8 +adam_b2: 0.99 +amp: false +audio_num_mel_bins: 80 +audio_sample_rate: 22050 +base_config: +- configs/tts/hifigan.yaml +- configs/tts/lj/base_mel2wav.yaml +binarization_args: + shuffle: false + trim_eos_bos: false + trim_sil: false + with_align: false + with_f0: true + with_f0cwt: false + with_linear: false + with_spk_embed: false + with_txt: true + with_wav: true +binarizer_cls: data_gen.tts.base_binarizer.BaseBinarizer +binary_data_dir: data/binary/ljspeech_wav +check_val_every_n_epoch: 10 +clip_grad_norm: 1 +clip_grad_value: 0 +debug: false +dec_ffn_kernel_size: 9 +dec_layers: 4 +dict_dir: '' +disc_start_steps: 40000 +discriminator_grad_norm: 1 +discriminator_optimizer_params: + eps: 1.0e-06 + lr: 0.0002 + weight_decay: 0.0 +discriminator_params: + bias: true + conv_channels: 64 + in_channels: 1 + kernel_size: 3 + layers: 10 + nonlinear_activation: LeakyReLU + nonlinear_activation_params: + negative_slope: 0.2 + out_channels: 1 + use_weight_norm: true +discriminator_scheduler_params: + gamma: 0.999 + step_size: 600 +dropout: 0.1 +ds_workers: 1 +enc_ffn_kernel_size: 9 +enc_layers: 4 +endless_ds: true +ffn_act: gelu +ffn_padding: SAME +fft_size: 1024 +fm_loss: false +fmax: 7600 +fmin: 80 +frames_multiple: 1 +gen_dir_name: '' +generator_grad_norm: 10 +generator_optimizer_params: + eps: 1.0e-06 + lr: 0.0002 + weight_decay: 0.0 +generator_params: + aux_channels: 80 + aux_context_window: 0 + dropout: 0.0 + gate_channels: 128 + in_channels: 1 + kernel_size: 3 + layers: 30 + out_channels: 1 + residual_channels: 64 + skip_channels: 64 + stacks: 3 + upsample_net: ConvInUpsampleNetwork + upsample_params: + upsample_scales: + - 4 + - 4 + - 4 + - 4 + use_nsf: false + use_pitch_embed: false + use_weight_norm: true +generator_scheduler_params: + gamma: 0.999 + step_size: 600 +griffin_lim_iters: 60 +hidden_size: 256 +hop_size: 256 +infer: false +lambda_adv: 4.0 +lambda_mel: 45.0 +load_ckpt: '' +loud_norm: false +lr: 2.0 +max_epochs: 1000 +max_eval_sentences: 1 +max_eval_tokens: 60000 +max_frames: 1548 +max_input_tokens: 1550 +max_samples: 8192 +max_sentences: 24 +max_tokens: 30000 +max_updates: 3000000 +mel_vmax: 1.5 +mel_vmin: -6 +min_level_db: -100 +num_ckpt_keep: 3 +num_heads: 2 +num_mels: 80 +num_sanity_val_steps: 5 +num_spk: 1 +optimizer_adam_beta1: 0.9 +optimizer_adam_beta2: 0.98 +out_wav_norm: false +pitch_extractor: parselmouth +pre_align_args: + allow_no_txt: false + denoise: false + forced_align: mfa + sox_resample: false + trim_sil: false + txt_processor: en + use_tone: true +pre_align_cls: '' +print_nan_grads: false +processed_data_dir: data/processed/ljspeech +profile_infer: false +raw_data_dir: data/raw/LJSpeech-1.1 +ref_level_db: 20 +rerun_gen: true +resblock: '1' +resblock_dilation_sizes: +- - 1 + - 3 + - 5 +- - 1 + - 3 + - 5 +- - 1 + - 3 + - 5 +resblock_kernel_sizes: +- 3 +- 7 +- 11 +reset_phone_dict: true +resume_from_checkpoint: 0 +sampling_rate: 22050 +save_best: true +save_codes: [] +save_f0: false +save_gt: true +seed: 1234 +sort_by_len: true +stft_loss_params: + fft_sizes: + - 1024 + - 2048 + - 512 + hop_sizes: + - 120 + - 240 + - 50 + win_lengths: + - 600 + - 1200 + - 240 + window: hann_window +stop_token_weight: 5.0 +task_cls: tasks.vocoder.hifigan.HifiGanTask +tb_log_interval: 100 +test_input_dir: '' +test_num: 100 +test_set_name: test +train_set_name: train +upsample_initial_channel: 512 +upsample_kernel_sizes: +- 16 +- 16 +- 4 +- 4 +upsample_rates: +- 8 +- 8 +- 2 +- 2 +use_mel_loss: false +use_pitch_embed: false +val_check_interval: 2000 +valid_monitor_key: val_loss +valid_monitor_mode: min +valid_set_name: valid +vocoder: pwg +vocoder_ckpt: '' +warmup_updates: 8000 +weight_decay: 0 +win_length: null +win_size: 1024 +window: hann +work_dir: checkpoints/0414_hifi_lj_1 diff --git a/text_to_speech/checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt b/text_to_speech/checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..e13d7f8029969fc5806b22b52dd402d2cd619f08 --- /dev/null +++ b/text_to_speech/checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3def8d31347c248a0416fc41a043f3b3d8a9044ca9abeb32099818a056f057a2 +size 1016199247 diff --git a/text_to_speech/checkpoints/ljspeech/data_info/phone_set.json b/text_to_speech/checkpoints/ljspeech/data_info/phone_set.json new file mode 100644 index 0000000000000000000000000000000000000000..62bc3f6943c3d69a8060b9bd6a9e8d9ce93d5bdd --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/data_info/phone_set.json @@ -0,0 +1 @@ +["!", ",", ".", ":", ";", "", "", "?", "AA0", "AA1", "AA2", "AE0", "AE1", "AE2", "AH0", "AH1", "AH2", "AO0", "AO1", "AO2", "AW0", "AW1", "AW2", "AY0", "AY1", "AY2", "B", "CH", "D", "DH", "EH0", "EH1", "EH2", "ER0", "ER1", "ER2", "EY0", "EY1", "EY2", "F", "G", "HH", "IH0", "IH1", "IH2", "IY0", "IY1", "IY2", "JH", "K", "L", "M", "N", "NG", "OW0", "OW1", "OW2", "OY0", "OY1", "OY2", "P", "R", "S", "SH", "T", "TH", "UH0", "UH1", "UH2", "UW0", "UW1", "UW2", "V", "W", "Y", "Z", "ZH"] \ No newline at end of file diff --git a/text_to_speech/checkpoints/ljspeech/data_info/spk_map.json b/text_to_speech/checkpoints/ljspeech/data_info/spk_map.json new file mode 100644 index 0000000000000000000000000000000000000000..161106099d8879092aaad3305fec7d371597e168 --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/data_info/spk_map.json @@ -0,0 +1 @@ +{"": 0} \ No newline at end of file diff --git a/text_to_speech/checkpoints/ljspeech/data_info/word_set.json b/text_to_speech/checkpoints/ljspeech/data_info/word_set.json new file mode 100644 index 0000000000000000000000000000000000000000..2351fad49bbe42f14c19490553b02d7efe780769 --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/data_info/word_set.json @@ -0,0 +1 @@ +["!", ",", ".", ":", ";", "", "", "?", "a", "abandon", "abandoned", "abandonment", "abash", "abbey", "abduction", "abetting", "abeyance", "abiding", "abilities", "ability", "abject", "able", "ablution", "ablutions", "ably", "abode", "abolished", "abolition", "abominable", "abound", "about", "above", "abraham", "abrahams", "abroad", "abruptly", "abscond", "absconded", "absence", "absent", "absolute", "absolutely", "absorb", "absorbed", "absorbing", "absorbs", "absorption", "abstained", "abstracted", "abstraction", "abstractions", "absurdity", "abundance", "abundant", "abundantly", "abuse", "abused", "abuses", "academy", "accent", "accentuated", "accept", "acceptance", "acceptances", "accepted", "accepting", "access", "accession", "accessory", "accident", "accidentally", "accidents", "accommodate", "accommodated", "accommodating", "accommodation", "accommodations", "accompanied", "accompany", "accompanying", "accomplice", "accomplices", "accomplish", "accomplished", "accomplishing", "accomplishment", "accord", "accordance", "accorded", "according", "accordingly", "accosted", "account", "accountant", "accounted", "accounts", "accumulate", "accumulated", "accumulation", "accuracy", "accurate", "accurately", "accusation", "accused", "accustomed", "achieve", "achieved", "achievements", "acid", "acknowledged", "acquaintance", "acquaintances", "acquaintanceship", "acquainted", "acquire", "acquired", "acquiring", "acquittal", "acquitted", "acres", "across", "act", "acted", "acting", "action", "actions", "active", "actively", "activities", "activity", "actor", "actors", "acts", "actual", "actually", "acutely", "ad", "adam", "adams", "adaptation", "adaptations", "adapted", "adaptive", "add", "added", "addicted", "adding", "addison", "addition", "additional", "additions", "address", "addressed", "addresses", "addressing", "adduced", "adept", "adepts", "adequacy", "adequate", "adequately", "adhered", "adherence", "adhesive", "adjacent", "adjoining", "adjudged", "adjusted", "adjustment", "adlai", "administer", "administered", "administering", "administration", "administrative", "admirable", "admirably", "admiral", "admiralty", "admired", "admission", "admissions", "admit", "admits", "admitted", "admitting", "admonition", "adolph", "adolphus", "adopt", "adopted", "adoption", "adult", "adults", "advance", "advanced", "advancement", "advances", "advantage", "advantages", "adventure", "adventures", "adventurous", "adverse", "adversity", "advert", "advertised", "advertisement", "advertising", "advice", "advise", "advised", "advising", "advocated", "aerial", "affair", "affairs", "affected", "affecting", "affection", "affections", "affidavit", "affinities", "affirmed", "affirms", "affixed", "afflicted", "afford", "afforded", "affording", "aforesaid", "afraid", "after", "afternoon", "afternoons", "afterward", "afterwards", "again", "against", "agar", "age", "aged", "agencies", "agency", "agencys", "agent", "agents", "ages", "aggravated", "aggravation", "aggregate", "aggregations", "aggressive", "aggrieved", "agitate", "agitated", "agitation", "ago", "agony", "agree", "agreed", "agreement", "agreements", "agrees", "agriculture", "ahead", "aid", "aiding", "aim", "aimed", "aiming", "aims", "air", "aircraft", "aired", "aires", "airing", "airplane", "airport", "airway", "airy", "aisle", "aisles", "akermans", "akin", "alan", "alarm", "alarmed", "albans", "albert", "album", "alderman", "aldermen", "aldermens", "aldus", "ale", "alek", "alert", "alerted", "alertness", "alexander", "alfred", "alias", "aliases", "alighted", "alighting", "aligning", "alike", "alimentary", "alive", "all", "allegations", "alleged", "allegiance", "alley", "alliance", "allied", "allington", "allnutt", "allotment", "allotted", "allow", "allowance", "allowances", "allowed", "allowing", "alluded", "almighty", "almost", "alms", "alone", "along", "alongside", "aloud", "already", "also", "alter", "alterations", "altered", "altering", "alternate", "alternated", "alternative", "altgens", "although", "altogether", "always", "am", "amass", "amassed", "amateur", "amateurs", "ambassador", "ambitious", "ameliorated", "ameliorating", "amelioration", "amend", "amended", "amendment", "amendments", "america", "american", "americanized", "americans", "amid", "amidst", "ammonia", "ammunition", "among", "amongst", "amos", "amount", "amounted", "amounting", "amounts", "amphibian", "amphibians", "ample", "amplified", "amputation", "amused", "amusement", "amusing", "an", "anabolism", "analogous", "analysis", "anarchist", "anatomy", "ancestors", "ancestral", "ancient", "ancients", "and", "anderson", "andrews", "angeles", "angelini", "anger", "angle", "angles", "anglesea", "angling", "angry", "animadversion", "animadvert", "animadverted", "animadverting", "animal", "animals", "annals", "annex", "annexe", "announced", "annoy", "annoyance", "annoyances", "annual", "annually", "annum", "anomalous", "another", "anoura", "answer", "answered", "answering", "answers", "antecedents", "anthony", "anti", "anticipated", "anticipating", "anticipation", "anticipations", "antics", "antimony", "antlers", "antonio", "anxiety", "anxious", "anxiously", "any", "anybody", "anyone", "anything", "anywhere", "apace", "apart", "apartment", "apartments", "apathy", "aperture", "apertures", "apiece", "apothecaries", "apothecary", "appalling", "apparatus", "apparel", "appareled", "apparent", "apparently", "appeal", "appealed", "appeals", "appear", "appearance", "appearances", "appeared", "appearing", "appears", "appendix", "appetite", "appleby", "appliances", "application", "applications", "applied", "apply", "appoint", "appointed", "appointment", "apprehended", "apprehension", "apprentice", "apprised", "approach", "approached", "approaches", "approaching", "appropriate", "appropriated", "appropriately", "appropriation", "approval", "approve", "approved", "approver", "approximate", "approximated", "approximately", "april", "apron", "aptitude", "arab", "arabian", "arabic", "arabs", "arachtu", "arbor", "arcade", "arch", "archbishop", "arches", "architect", "architects", "architectural", "architecture", "arduous", "are", "area", "areas", "argue", "argued", "arguing", "argument", "arguments", "arise", "arises", "arising", "aristocracy", "aristocratic", "arm", "armed", "armless", "armpit", "arms", "army", "arnold", "arose", "around", "aroused", "arraigned", "arraignment", "arrange", "arranged", "arrangement", "arrangements", "arrayed", "arrest", "arrested", "arresting", "arrests", "arrival", "arrivals", "arrive", "arrived", "arrives", "arriving", "arrogant", "arsenic", "arson", "art", "arteries", "arthur", "article", "articles", "artificial", "artisans", "artist", "artistic", "artistically", "artlessness", "arts", "as", "ascend", "ascendancy", "ascendant", "ascended", "ascent", "ascertain", "ascertained", "ascot", "ashamed", "ashes", "ashley", "ashy", "aside", "ask", "asked", "askern", "asking", "asleep", "aspect", "aspects", "aspirants", "assailant", "assailed", "assassin", "assassinate", "assassinated", "assassinating", "assassination", "assassins", "assault", "assemblage", "assembled", "assembly", "assert", "asserted", "assiduously", "assigned", "assignment", "assignments", "assist", "assistance", "assistant", "assistants", "assisted", "assisting", "assize", "assizes", "associate", "associated", "associates", "association", "associations", "assume", "assumed", "assuming", "assumption", "assurance", "assurances", "assurbanipal", "assure", "assured", "assyrians", "astonished", "asunder", "asylum", "at", "ate", "atlantic", "atmosphere", "atmospheric", "atop", "atrocious", "atrocity", "attach", "attached", "attachment", "attack", "attacked", "attacks", "attainments", "attempt", "attempted", "attempting", "attempts", "attend", "attendance", "attendant", "attendants", "attended", "attending", "attends", "attention", "attentions", "attentive", "attentively", "attest", "attic", "attitude", "attorney", "attorneys", "attract", "attracted", "attracting", "attribute", "attributed", "atwell", "auburn", "audible", "audience", "auditors", "augmented", "augsburg", "august", "auspices", "austin", "australia", "authentic", "authoritative", "authoritatively", "authorities", "authority", "authorize", "authorized", "authors", "autobiographical", "automatic", "automatically", "automation", "automobile", "autopsy", "autumn", "avail", "available", "availed", "avenue", "avenues", "average", "averaged", "averted", "avocation", "avoid", "avoidance", "avoided", "awaited", "awaiting", "awake", "awakened", "awakening", "aware", "awareness", "away", "awful", "awkward", "axe", "axis", "b", "babel", "baby", "babylon", "babylonia", "babylonian", "babylonians", "back", "backboned", "backed", "backfire", "backgammon", "background", "backwards", "bad", "badge", "badly", "badness", "bag", "bags", "bail", "bailey", "bake", "baker", "bakers", "baking", "balance", "balanced", "balances", "balcony", "bald", "ball", "ballad", "ballistics", "balls", "bambridge", "band", "bandied", "baneful", "bank", "banker", "bankers", "bankes", "banking", "bankrupt", "bankruptcy", "banks", "banquets", "baptist", "bar", "barbara", "barbariously", "barbarous", "barber", "barbers", "bare", "barely", "bargaining", "barman", "barnett", "baron", "baronet", "baronial", "barrack", "barrel", "barrett", "barrier", "barriers", "barry", "bars", "bart", "barthelemy", "bartholomew", "base", "based", "basement", "bases", "bashour", "basic", "basically", "basin", "basis", "baskerville", "basket", "basle", "bat", "batch", "batchelor", "bateman", "bates", "bath", "bathed", "bathing", "bathroom", "baths", "batter", "battered", "battle", "baxter", "bay", "be", "beam", "beams", "bean", "bear", "bearing", "bears", "beast", "beasts", "beat", "beaten", "beating", "beaumont", "beautiful", "beauty", "became", "because", "beckley", "become", "becomes", "becoming", "bed", "bedclothes", "bedding", "bedroom", "beds", "bedstead", "bedsteads", "bedtime", "been", "beer", "before", "began", "begged", "begging", "begin", "beginner", "beginning", "beginnings", "begins", "begun", "behalf", "behaved", "behaves", "behavior", "behind", "behn", "behold", "being", "beings", "bel", "belgium", "belgrave", "belian", "belief", "beliefs", "believe", "believed", "believes", "bell", "bellingham", "bells", "belmont", "belonged", "belonging", "belongings", "beloved", "below", "belshazzar", "belsize", "belt", "belus", "ben", "benavides", "bench", "benches", "beneath", "beneficial", "benefit", "benefits", "benevolence", "benevolent", "benign", "benjamin", "bennet", "bennett", "bent", "bentham", "bequest", "bequests", "berlin", "bermondsey", "berth", "berwick", "beside", "besides", "besotted", "best", "bestow", "bethesda", "bethlehem", "betrayed", "betraying", "better", "betting", "between", "beverage", "beware", "beyond", "bible", "bibles", "bid", "bidden", "bidwell", "big", "bigamy", "bill", "billings", "billions", "billows", "bills", "billy", "bins", "biological", "biology", "birch", "bird", "birmingham", "birth", "birthday", "biscuit", "bishop", "bishops", "bit", "bitten", "bitter", "bitterly", "bitterness", "bitters", "bitumen", "black", "blackburn", "blacksmith", "blame", "blamed", "blanched", "blank", "blanket", "blasphemous", "bleating", "bledsoe", "bleed", "bleeding", "blemish", "blind", "blinded", "blinked", "block", "blocked", "blocks", "blond", "blood", "blooded", "bloodthirsty", "bloody", "blow", "blowing", "blows", "blue", "board", "boarded", "boarding", "boards", "boast", "boasted", "boat", "boats", "bob", "bodies", "bodily", "bodoni", "body", "bogus", "boiled", "boiling", "boldly", "boldness", "bolster", "bolt", "bolted", "bolts", "bomb", "bona", "bond", "bonded", "bonding", "bonds", "bone", "bones", "bonnet", "bonneted", "bonnets", "book", "books", "booming", "boon", "boot", "booty", "bordeaux", "border", "bordered", "bore", "born", "borne", "borough", "boroughs", "borrowed", "boston", "botany", "both", "bottle", "bottom", "bouck", "bought", "bouhe", "boulevard", "boulogne", "bound", "boundary", "bounds", "bourne", "bousfield", "bow", "bowed", "bowl", "box", "boxes", "boxing", "boy", "boyfriend", "boys", "brace", "bracelet", "bradawls", "bradshaws", "brain", "brains", "branch", "branches", "brass", "brave", "bravery", "breach", "bread", "breadth", "break", "breaker", "breakfast", "breaking", "breaks", "breast", "breath", "breathe", "breathed", "breathing", "bred", "breeches", "brennan", "brennans", "brethren", "brewer", "brewery", "brian", "bribe", "bribery", "brick", "bricklayers", "bricks", "brickwork", "bride", "bridewell", "bridge", "bridges", "brief", "briefly", "briefs", "briggs", "bright", "brilliant", "brilliants", "bring", "bringing", "bringuier", "bristol", "britain", "british", "broad", "broadcast", "broader", "broadly", "broadmoor", "broadsheet", "brochure", "broke", "broken", "bronx", "bronze", "brooks", "brooms", "brother", "brothers", "brought", "brown", "brownrigg", "browns", "brows", "bruce", "bruised", "brunt", "brutal", "brutality", "brutalized", "brutally", "brute", "bubble", "bubbletop", "buckingham", "budget", "buell", "buenos", "buff", "build", "builder", "builders", "building", "buildings", "built", "bulge", "bulk", "bulky", "bullet", "bulletproof", "bullets", "bullion", "bulls", "bumble", "buncher", "bunchers", "bundle", "bungling", "buoyed", "buranelli", "burchell", "burdens", "bureau", "bureaucracy", "bureaus", "burgess", "burglar", "burglaries", "burglars", "burglary", "burial", "buried", "buries", "burke", "burking", "burkley", "burn", "burned", "burnett", "burning", "burnt", "burst", "burthen", "bury", "bus", "buses", "bushes", "bushy", "busily", "business", "businessmen", "busy", "but", "butcher", "butler", "butlers", "butt", "butter", "button", "buxton", "buxtons", "buy", "buying", "by", "bye", "bystanders", "c", "cab", "cabin", "cabinet", "cabins", "cabman", "cadigan", "caducibranch", "cage", "cajoled", "cakes", "calais", "calamities", "calamitous", "calcraft", "calcrafts", "calculated", "calculating", "calculation", "calendar", "calendars", "caliber", "call", "callaway", "called", "callers", "calligraphy", "calling", "callous", "callousness", "calm", "calmly", "camberwell", "came", "cameo", "camera", "campaign", "campbell", "campbells", "can", "canada", "canal", "canceled", "candidate", "candles", "canning", "cannings", "cannon", "cannot", "canonicals", "cant", "canterbury", "cap", "capabilities", "capability", "capable", "capacities", "capacity", "capias", "capital", "capitalism", "capitalist", "capitally", "caps", "capstan", "capt", "captain", "captains", "captive", "captors", "capture", "captured", "car", "caravans", "carbine", "carbohydrate", "carbohydrates", "carbon", "carbonic", "carcano", "card", "cardboard", "cardiac", "carding", "cards", "care", "cared", "career", "careful", "carefully", "careless", "carelessly", "carelessness", "cargo", "carlisle", "carlos", "carousing", "carpenter", "carpenters", "carpet", "carpets", "carriage", "carriages", "carrico", "carried", "carro", "carry", "carrying", "cars", "cart", "carter", "cartilage", "carton", "cartons", "cartridge", "cartridges", "carver", "case", "caseload", "casements", "cases", "cash", "cashed", "cashman", "casket", "caslon", "caslons", "caspar", "caspars", "cast", "casting", "castle", "castlereagh", "castro", "castros", "casually", "catabolic", "catabolism", "catastrophe", "catastrophes", "catch", "catching", "categories", "category", "catherine", "catholics", "catnach", "cato", "cattle", "caught", "caulked", "cause", "caused", "causes", "causing", "caustic", "cautioned", "cautiously", "cavern", "cavities", "cavity", "caxtons", "cease", "ceased", "ceiling", "ceilings", "celebrated", "cell", "cellar", "cells", "cellular", "cellulose", "cement", "center", "centered", "centers", "centimeter", "centimeters", "central", "cents", "centuries", "century", "centurys", "ceremonial", "ceremonials", "ceremony", "certain", "certainly", "certainty", "certificate", "certificates", "certified", "certify", "cetera", "chaff", "chain", "chained", "chains", "chair", "chairman", "chalked", "challenge", "chamber", "chambering", "chambers", "champagne", "champions", "chance", "chancery", "chances", "chandler", "chandlers", "change", "changed", "changes", "channel", "chaos", "chapel", "chapels", "chaplain", "chaplains", "chapter", "chapters", "character", "characteristic", "characteristics", "characterize", "characterized", "characterless", "characters", "charge", "charged", "charges", "charging", "charitable", "charities", "charity", "charles", "charlotte", "charter", "chartists", "chase", "chatham", "chats", "cheap", "cheapest", "cheapside", "check", "checked", "checking", "checklist", "checkpoint", "checks", "cheek", "cheeked", "cheer", "cheered", "cheerful", "cheerless", "cheers", "cheese", "chemical", "chemists", "cheque", "cheques", "cheshire", "chest", "chester", "chevaux", "chicago", "chicanery", "chief", "chiefly", "chiefs", "child", "childish", "childless", "children", "chill", "chilly", "chimney", "chimneys", "chin", "chinamen", "chinese", "chisel", "chiswick", "chlorophyll", "choice", "cholera", "choose", "chose", "chosen", "christian", "christmas", "christs", "chronicle", "chronicles", "chubbs", "chummage", "church", "cia", "cigar", "cigarettes", "cigars", "circle", "circles", "circuit", "circular", "circulars", "circulate", "circulated", "circulating", "circulation", "circulatory", "circumference", "circumstance", "circumstances", "circumstantial", "cissian", "cistern", "cited", "cities", "citizen", "citizens", "citizenship", "city", "citys", "civil", "civilian", "civilization", "clad", "claim", "claimed", "claims", "clamp", "clamps", "clanking", "clarence", "clarify", "clarifying", "clark", "clasped", "clasps", "class", "classed", "classes", "classics", "classification", "classified", "classify", "clause", "clay", "clean", "cleaning", "cleanliness", "cleansed", "clear", "cleared", "clearly", "clearness", "clemency", "clench", "clenched", "clergyman", "clergymen", "clerical", "clerk", "clerkenwell", "clerks", "clever", "cleverly", "click", "clients", "cliff", "clifton", "climb", "climbed", "climbing", "clinging", "clings", "clinton", "clipboard", "clipperton", "clipping", "cloak", "clock", "close", "closed", "closely", "closer", "closest", "closing", "cloth", "clothes", "clothing", "club", "clubs", "clue", "clumsy", "clun", "co", "coach", "coachman", "coadjutor", "coal", "coals", "coarse", "coast", "coat", "cobbett", "cobham", "cock", "cocked", "code", "codes", "coffee", "coffin", "coffins", "coiled", "coin", "coincide", "coiners", "coining", "colchicum", "cold", "coldbath", "cole", "coleman", "coles", "collapse", "collapsed", "collar", "collared", "colleagues", "collected", "collection", "collections", "collective", "collector", "college", "collegians", "collins", "collusion", "cologne", "colonel", "colonies", "colony", "color", "colorado", "colored", "coloring", "colors", "colossal", "column", "columns", "combatants", "combination", "combined", "combustible", "come", "comely", "comer", "comes", "comfort", "comfortable", "comforts", "coming", "command", "commanded", "commander", "commanding", "commence", "commenced", "commencement", "commencing", "commendable", "commendation", "commented", "comments", "commerce", "commercial", "commercialism", "commission", "commissioner", "commissioners", "commissions", "commit", "commitment", "committal", "committals", "committed", "committee", "committees", "committing", "commodious", "common", "commonest", "commonly", "commonplace", "commons", "commotion", "communicate", "communicated", "communicating", "communication", "communications", "communion", "communism", "communist", "communities", "community", "commutation", "commuted", "companies", "companion", "companions", "companionship", "company", "companys", "comparable", "comparative", "comparatively", "compare", "compared", "comparing", "comparison", "comparisons", "compartment", "compass", "compassed", "compassion", "compatible", "compel", "compelled", "compelling", "compels", "compensate", "competent", "competition", "competitors", "complacency", "complain", "complained", "complaint", "complaints", "complete", "completed", "completely", "completing", "completion", "complex", "complexion", "compliance", "complicated", "complicity", "comply", "composed", "composites", "composition", "compound", "compounds", "comprehensive", "compression", "comprised", "comprising", "compromised", "compter", "compters", "compulsory", "comrades", "conceal", "concealed", "conceded", "conceit", "conceivable", "conceive", "conceived", "concentrate", "concentration", "concept", "conception", "concern", "concerned", "concerning", "concerns", "concession", "conclude", "concluded", "concludes", "concluding", "conclusion", "conclusions", "conclusively", "concomitant", "concomitants", "concourse", "concrete", "concurrence", "condemnation", "condemned", "condition", "conditions", "conduced", "conducive", "conduct", "conducted", "conducting", "confederate", "confederates", "conference", "conferred", "confess", "confessed", "confessedly", "confession", "confessions", "confided", "confidence", "confident", "confidential", "confidently", "confiding", "configuration", "confine", "confined", "confinement", "confirm", "confirmed", "conflict", "conflicting", "conflicts", "conformity", "confronted", "confuse", "confused", "confusion", "congregate", "congregated", "congregation", "congress", "conjunction", "connally", "connallys", "connect", "connected", "connection", "connived", "conquering", "conscience", "conscious", "consciousness", "consecutive", "consented", "consequence", "consequences", "consequent", "consequently", "conservation", "consider", "considerable", "considerably", "considerate", "consideration", "considerations", "considered", "considering", "considers", "consign", "consist", "consisted", "consistent", "consistently", "consisting", "consists", "consolation", "consolatory", "console", "consolidate", "consolidated", "consolidation", "consoling", "consols", "consort", "conspiracy", "conspirators", "constable", "constables", "constant", "constantly", "consternation", "constituent", "constituents", "constitute", "constituted", "constitutes", "constitution", "constitutional", "construct", "constructed", "construction", "constructive", "construed", "consul", "consultants", "consulted", "consumed", "consumer", "consummate", "contact", "contacts", "contagion", "contagious", "contain", "contained", "containing", "contains", "contaminating", "contamination", "conte", "contemplated", "contemporaneous", "contemporaries", "contemporary", "contempt", "contemptuous", "contend", "contended", "content", "contentedly", "contents", "contested", "context", "continent", "continental", "contingency", "contingents", "continually", "continue", "continued", "continues", "continuing", "continuous", "continuously", "contract", "contracted", "contractile", "contraction", "contractors", "contracts", "contrary", "contrast", "contrasting", "contrasts", "contravention", "contributed", "contributes", "contribution", "contrite", "contrition", "contrivance", "contriver", "contriving", "control", "controlled", "controlling", "controversial", "controversy", "convenience", "convenient", "conveniently", "conventionality", "conversation", "converse", "conversed", "conversing", "convert", "convertible", "convey", "conveyance", "conveyed", "conveying", "convict", "convicted", "conviction", "convictions", "convicts", "convinced", "convinces", "convincing", "convulsions", "convulsive", "convulsively", "cook", "cooked", "cooking", "cooks", "cool", "coolly", "coolness", "cooperate", "cooperating", "cooperation", "coordinate", "coordinated", "coordinating", "coordination", "cop", "cope", "copenhagen", "copes", "copied", "copies", "copper", "copy", "cord", "corn", "corner", "corners", "cornhill", "corporate", "corporation", "corporations", "corporeal", "corps", "corpse", "correct", "correction", "correctly", "correctness", "correspondence", "corroborated", "corroboration", "corrupt", "corrupted", "corruption", "cost", "costing", "costliness", "costly", "costs", "cot", "cottage", "cotton", "cough", "coughing", "could", "couldnt", "council", "councilmen", "councilors", "counsel", "counselor", "counsels", "count", "counted", "countenance", "counteract", "counteracted", "countered", "counterfeit", "counterfeiting", "counties", "countries", "country", "counts", "county", "couple", "coupled", "couples", "coupon", "courage", "courageous", "courier", "course", "court", "courts", "courvoisier", "courvoisiers", "coventry", "cover", "covered", "covering", "coveted", "cowardly", "cracked", "craft", "crafts", "craftsmen", "craig", "craigs", "crammed", "crane", "crank", "cranks", "crape", "crater", "crawford", "crawled", "craze", "create", "created", "creating", "creation", "creature", "creatures", "credible", "credit", "creditable", "creditors", "credulity", "crept", "crescent", "crevice", "crew", "crib", "cribbage", "cried", "cries", "crime", "crimes", "criminal", "criminality", "criminals", "crippled", "crisis", "criteria", "critic", "critical", "criticism", "crop", "cropped", "cross", "crossed", "crosshairs", "crossing", "crowd", "crowded", "crowding", "crowds", "crown", "crowned", "crucial", "cruel", "cruelty", "cruikshanks", "crushed", "cruz", "cry", "crying", "crystal", "ctesias", "cuba", "cuban", "cubans", "cubits", "cuff", "culminated", "culpable", "culprit", "culprits", "culture", "cumbered", "cumbrous", "cummings", "cunningham", "cup", "cupboard", "cupboards", "cupidity", "cups", "curb", "cure", "curiosities", "curiosity", "curious", "current", "curry", "curse", "curses", "cursing", "curtailment", "curtain", "curves", "custodians", "custody", "custom", "customary", "customers", "customs", "cut", "cutdowns", "cutlasses", "cuts", "cutting", "cuvier", "cuviers", "cylinder", "cyrus", "d", "dagger", "daily", "dallas", "damage", "damaged", "damages", "damascus", "damn", "damnatory", "damp", "dance", "danger", "dangerous", "dangers", "daniel", "danish", "dare", "daring", "darius", "dark", "darker", "darkness", "darwin", "dasent", "dasset", "dastardly", "data", "date", "dated", "dates", "dating", "daughter", "daughters", "daulby", "david", "davidson", "davis", "davison", "day", "daylight", "days", "daytime", "dazzling", "de", "dead", "deadly", "deal", "dealer", "dealers", "dealey", "dealing", "dealings", "deals", "dealt", "death", "deaths", "debarred", "debasing", "debate", "debated", "debates", "debauched", "debauchery", "debt", "debtor", "debtors", "debts", "debutant", "decade", "decades", "decapitation", "decaying", "deceased", "deceives", "december", "decency", "decent", "deception", "decide", "decided", "deciding", "decision", "decisions", "decisively", "deck", "declaration", "declare", "declared", "declaring", "decline", "declined", "decomposition", "decorous", "decorum", "decreed", "dedicated", "deduced", "deducted", "deduction", "deed", "deeds", "deem", "deemed", "deep", "deeper", "deepest", "deeply", "deer", "defacing", "defalcations", "default", "defaulter", "defeated", "defeats", "defect", "defected", "defection", "defective", "defectively", "defectors", "defects", "defendants", "defended", "defense", "defenses", "defensive", "deferred", "defiance", "defiant", "deficiencies", "deficiency", "defied", "define", "defined", "defining", "definite", "definitely", "definition", "deformed", "defraud", "defrauding", "defy", "degenerated", "degradation", "degraded", "degree", "degrees", "dejection", "delano", "delarue", "delay", "delayed", "delays", "delectable", "delgado", "deliberate", "deliberately", "deliberation", "delicacy", "delicate", "delightful", "delinquency", "delinquent", "deliver", "delivered", "deliverer", "delivery", "delustered", "demand", "demanded", "demanding", "demands", "demarcation", "demeanor", "democracy", "democratic", "demonstrably", "demonstrate", "demonstrated", "demonstrates", "demonstrating", "demonstration", "demonstrations", "demonstrators", "demoralizing", "den", "denial", "denials", "denied", "denison", "denizens", "denoted", "denouncing", "dense", "densely", "denunciations", "deny", "denying", "depart", "departed", "department", "departments", "departure", "depend", "depended", "dependence", "dependent", "depends", "deplorable", "deployment", "deposed", "deposited", "depositors", "depository", "depraved", "deprecated", "deprecates", "depredators", "depression", "deprivation", "deprived", "depth", "depths", "deputy", "derision", "derive", "derived", "descend", "descended", "descent", "describe", "described", "describes", "describing", "description", "descriptions", "desert", "deserted", "deserve", "deserved", "deserving", "design", "designate", "designated", "designed", "designs", "desirable", "desire", "desired", "desiring", "desk", "desolate", "despair", "desperadoes", "desperate", "desperation", "despite", "despotism", "destined", "destiny", "destitute", "destroy", "destroyed", "destruction", "destructive", "detached", "detachment", "detail", "detailed", "details", "detain", "detained", "detect", "detected", "detection", "detective", "detectives", "detention", "deter", "deteriorated", "deterioration", "determination", "determine", "determined", "determining", "deterred", "deterrent", "detestable", "develop", "developed", "developing", "development", "developmental", "developments", "device", "devices", "devised", "devoid", "devote", "devoted", "devoting", "devotion", "devotional", "dexterous", "diabolical", "diagnosis", "dials", "diameter", "diamonds", "diapers", "diary", "dice", "dickens", "dictator", "did", "didnt", "didot", "die", "died", "diem", "diet", "dietaries", "dietary", "differ", "differed", "difference", "differences", "different", "differently", "differs", "difficult", "difficulties", "difficulty", "diffuse", "diffusion", "digested", "digestion", "digestive", "digging", "dignitaries", "dignity", "digression", "diligently", "dillon", "dimensions", "diminish", "diminished", "diminishing", "diminution", "dinas", "dining", "dinner", "dint", "dioxide", "direct", "directed", "directing", "direction", "directions", "directly", "director", "directors", "dirt", "dirty", "disability", "disagree", "disagreeable", "disappear", "disappearance", "disappeared", "disappointed", "disapproval", "disapproved", "disapproving", "disassemble", "disassembled", "disastrous", "disbeliever", "disbursed", "discarded", "discharge", "discharged", "disciplinary", "discipline", "disclosed", "disclosure", "disclosures", "discomfort", "discontent", "discontented", "discontinuance", "discontinued", "discordant", "discount", "discounted", "discourage", "discouragements", "discouraging", "discourse", "discourses", "discover", "discovered", "discovering", "discovery", "discreditable", "discretion", "discrimination", "discuss", "discussed", "discussion", "discussions", "disease", "disenchanted", "disfigured", "disgrace", "disgraced", "disgraceful", "disgraces", "disguise", "disguised", "disgusted", "disgusting", "disheartening", "dishonest", "dishonesty", "disinclination", "disinterested", "dislike", "dismay", "dismembered", "dismemberment", "dismissed", "disorder", "disordered", "disorderly", "disorders", "dispatch", "dispatched", "dispatcher", "dispensed", "dispensing", "disperses", "display", "displayed", "disporting", "disposal", "dispose", "disposed", "disposing", "disposition", "dispositions", "dispute", "disputes", "disregard", "disregarded", "disreputable", "dissatisfaction", "dissatisfied", "dissecting", "dissection", "dissections", "disseminating", "dissenters", "dissenting", "dissipate", "dissipation", "dissolute", "dissolved", "distance", "distances", "distant", "distemper", "distill", "distinct", "distinction", "distinctions", "distinctive", "distinctly", "distinguish", "distinguishable", "distinguished", "distinguishing", "distracted", "distracting", "distress", "distresses", "distressing", "distributed", "distributing", "distribution", "distributor", "district", "districts", "disturbance", "disturbances", "disturbed", "disturbing", "disuse", "ditch", "diverse", "diversified", "diversions", "diversity", "diverted", "divest", "divided", "dividend", "dividends", "divine", "division", "divisions", "divorce", "dixon", "do", "dobson", "dock", "docks", "doctor", "doctors", "doctrine", "document", "documents", "dodd", "doers", "does", "doesnt", "dog", "dogs", "doing", "dollar", "dollars", "dollimore", "domestic", "domination", "domingo", "dominoes", "don", "donation", "donations", "done", "donovan", "dont", "doom", "doomed", "door", "doors", "dose", "dot", "double", "doubled", "doubly", "doubt", "doubted", "doubtful", "doubtfulness", "doubtless", "doubts", "dough", "dougherty", "dover", "down", "downgrade", "downstairs", "downtown", "downward", "downwards", "dozen", "dr", "drafts", "dragged", "dragons", "drain", "dramatic", "dramatically", "draw", "drawer", "drawers", "drawing", "drawings", "drawn", "dread", "dreaded", "dreadful", "dreadfully", "dreams", "dreamy", "dress", "dressed", "dresser", "dresses", "dressing", "drew", "dried", "drill", "drink", "drinking", "drittal", "drive", "driven", "driver", "drivers", "driveway", "driving", "dromedary", "droops", "drop", "dropped", "drove", "drowning", "drs", "drugged", "drugstore", "drunk", "drunkenness", "drury", "dry", "dryad", "dublin", "duchess", "ducked", "ducking", "due", "dues", "dug", "duke", "dulany", "duly", "dumaine", "dumplings", "dundas", "dungeon", "dungeons", "dupe", "duplicate", "duplicates", "durability", "durance", "duranno", "durham", "during", "dusk", "dust", "dutch", "duties", "duty", "dwarfed", "dwelling", "dwelt", "dwyer", "dyeing", "dying", "e", "each", "eager", "eagerly", "ear", "earlene", "earlier", "earliest", "early", "earned", "earnest", "earnestly", "earnestness", "earning", "earnings", "ears", "earth", "ease", "easement", "easier", "easily", "east", "eastern", "easy", "eat", "eating", "ebullitions", "ecclesiastical", "economic", "economy", "edgar", "edge", "edges", "edgeware", "edgewise", "edifice", "edifying", "edinburgh", "edited", "edition", "editions", "editor", "editorials", "editors", "edmund", "edmunds", "educated", "educates", "education", "edward", "edwards", "edwin", "effect", "effected", "effective", "effectively", "effects", "effectual", "effectually", "efficacious", "efficiency", "efficient", "effluvia", "effort", "efforts", "effrontery", "effusion", "egg", "egress", "egypt", "eight", "eighteen", "eighteenth", "eighth", "eighths", "eighties", "eighty", "either", "ejected", "ekdahl", "eke", "eked", "eking", "el", "elaborate", "elapse", "elapsed", "elated", "elbow", "elder", "elderly", "elders", "elect", "elected", "election", "electric", "electrical", "elegance", "element", "elements", "elevated", "elevator", "elevators", "eleven", "eleventh", "eliminate", "eliminated", "eliminating", "elimination", "ellen", "elm", "eloquent", "else", "elsewhere", "eluded", "eluding", "elzevirs", "embankments", "embark", "embarked", "embarrassment", "embassy", "embellishment", "embezzlement", "embodied", "embodying", "embraced", "embracing", "embryology", "emerged", "emergencies", "emergency", "emigrate", "eminence", "emissaries", "emma", "emmanuel", "emoluments", "emotion", "emotional", "emotionally", "emotions", "emperor", "emphasis", "emphasize", "emphasized", "empire", "employ", "employed", "employee", "employees", "employer", "employers", "employment", "empowered", "empowering", "empty", "emptying", "emulated", "en", "enable", "enabled", "enacted", "enactment", "enactments", "encamped", "enclose", "enclosed", "enclosing", "enclosure", "encounter", "encountered", "encourage", "encouraged", "encouragement", "end", "endanger", "endangering", "endeavor", "endeavored", "endeavoring", "ended", "ending", "endorsed", "ends", "endure", "endured", "enduring", "enemies", "enemy", "energies", "energy", "enforce", "enforced", "enforcement", "engage", "engaged", "engendered", "engine", "england", "english", "engrafted", "engraved", "engraver", "engraving", "enhancing", "enjoy", "enjoyed", "enjoying", "enlarged", "enlargement", "enlightened", "enlightenment", "enlist", "enlisted", "enlivened", "ennui", "enormous", "enormously", "enough", "ensconced", "ensued", "ensuing", "entailed", "enter", "entered", "entering", "enterprise", "enterprises", "enters", "entertain", "entertained", "enthusiasm", "enthusiastic", "enthusiasts", "enticed", "entire", "entirely", "entirety", "entitled", "entrance", "entrances", "entrapped", "entrapper", "entreaties", "entries", "entrust", "entrusted", "entry", "enumerated", "enunciated", "enunciation", "envelope", "enviable", "environment", "enzyme", "epidemic", "epidermis", "episcopal", "episode", "epitomized", "epoch", "equal", "equally", "equipment", "equipped", "equivalent", "erased", "ere", "erect", "erected", "erecting", "erection", "erred", "error", "ervay", "esagil", "esarhaddon", "escape", "escaped", "escapes", "escort", "escorted", "escorting", "escritoire", "especial", "especially", "espoused", "esprit", "esq", "essence", "essential", "essentially", "essex", "establish", "established", "establishes", "establishing", "establishment", "establishments", "estate", "estates", "esther", "estimate", "estimated", "estimates", "et", "etc", "eternal", "eternity", "euins", "eulogized", "euphemistically", "euphrates", "europe", "european", "evaded", "evaluate", "evaluated", "evaluating", "evaluation", "evans", "evaporation", "evasion", "evasive", "eve", "even", "evening", "evenly", "event", "events", "eventually", "ever", "every", "everybody", "everyday", "everyone", "everything", "everywhere", "evidence", "evidenced", "evidences", "evident", "evidently", "evil", "evils", "evince", "evinced", "evoked", "evolution", "evolved", "ex", "exact", "exacted", "exacting", "exaction", "exactions", "exactly", "exaggerated", "exaltation", "exalted", "examination", "examine", "examined", "examining", "example", "examples", "excavated", "exceeded", "exceedingly", "excellence", "excellent", "except", "exception", "exceptional", "exceptions", "excess", "excessive", "excessively", "exchange", "exchanged", "exchequer", "excite", "excited", "excitement", "exciting", "exclaiming", "exclamations", "exclude", "excluded", "exclusion", "exclusive", "exclusively", "excretion", "excretions", "excuse", "excuses", "execration", "execrations", "executed", "executing", "execution", "executioner", "executioners", "executions", "executive", "executor", "executors", "exempted", "exercise", "exercised", "exercises", "exercising", "exertion", "exertions", "exeter", "exhalation", "exhales", "exhaustion", "exhaustive", "exhibit", "exhibited", "exhibiting", "exhibition", "exhortation", "exhortations", "exiles", "exist", "existed", "existence", "existing", "exists", "exit", "exorbitant", "expand", "expect", "expectation", "expected", "expecting", "expedient", "expend", "expended", "expenditure", "expenditures", "expense", "expenses", "expensive", "experience", "experienced", "experiences", "experiencing", "experiment", "experimental", "experiments", "expert", "experts", "expired", "explain", "explained", "explaining", "explanation", "explicit", "exploiting", "exploits", "explored", "explosion", "explosive", "expose", "exposed", "exposes", "exposure", "exposures", "express", "expressed", "expresses", "expressing", "expression", "expressions", "expressly", "extant", "extend", "extended", "extending", "extension", "extensive", "extensively", "extent", "external", "extinct", "extinguished", "extorted", "extorting", "extortion", "extra", "extract", "extracted", "extracts", "extraordinary", "extravagance", "extravagant", "extreme", "extremely", "extremity", "exuberant", "eye", "eyes", "eyewitness", "eyewitnesses", "ezida", "f", "fabricate", "fabrication", "face", "faced", "faces", "facilitate", "facilitating", "facilities", "facility", "facing", "fact", "factitious", "factor", "factors", "factory", "facts", "faded", "faggots", "fail", "failed", "failing", "fails", "failure", "failures", "fain", "fains", "faint", "fainted", "faintest", "fainting", "fair", "fairest", "fairly", "faith", "faithful", "fall", "fallacious", "fallen", "falling", "falls", "false", "falsely", "fame", "familiar", "familiarity", "families", "family", "famine", "famous", "fancied", "fancier", "fantasy", "far", "fare", "farm", "farmer", "farmers", "farming", "farringdon", "farther", "fascist", "fashion", "fashioned", "fasson", "fast", "fastened", "fat", "fatal", "fate", "father", "fathers", "fats", "fault", "fauntleroy", "fauntleroys", "favor", "favorable", "favored", "favorite", "favorites", "favoritism", "fbi", "fbis", "fear", "feared", "fearful", "fearless", "feasibility", "feasible", "feasting", "feature", "features", "february", "fed", "federal", "fee", "feel", "feeling", "feelings", "feels", "fees", "feet", "feigning", "fell", "fellow", "fellows", "felon", "felonies", "felonious", "feloniously", "felons", "felony", "felt", "feltham", "female", "females", "femoral", "fence", "fenning", "ferocity", "ferrari", "ferrers", "fertile", "fervently", "festered", "festive", "fetched", "fettered", "fever", "few", "fewer", "fiber", "fibers", "fiction", "fictitious", "fide", "fidel", "fidelity", "field", "fields", "fiendish", "fiercely", "fifteen", "fifteenth", "fifth", "fifty", "fight", "fighting", "fights", "figure", "figured", "figures", "filaments", "filched", "file", "filed", "files", "fill", "filled", "films", "filthiness", "filthy", "fin", "final", "finally", "finance", "financial", "find", "finding", "findings", "finds", "fine", "fines", "finest", "finger", "fingerprint", "fingerprints", "fingers", "finish", "finished", "finisher", "finishing", "finishings", "fire", "firearms", "firecracker", "fired", "fireplace", "fireplaces", "firers", "fireside", "firing", "firm", "firmest", "firmly", "firmness", "firms", "first", "fiscal", "fischer", "fish", "fished", "fisher", "fishes", "fist", "fit", "fitness", "fits", "fitted", "fitting", "fitz", "fitzroy", "five", "fives", "fix", "fixed", "fixing", "flag", "flagitious", "flagrant", "flames", "flanking", "flash", "flattened", "flaw", "fled", "flee", "fleet", "fleeting", "flemish", "flesh", "fletcher", "flew", "flight", "flings", "flintshire", "flock", "flocked", "flooded", "floor", "flooring", "floors", "flour", "floured", "flow", "flower", "flowery", "flown", "fluctuated", "fluctuations", "flue", "fluid", "fluids", "fluted", "flux", "fly", "flying", "foal", "focus", "fold", "folded", "folkestone", "follow", "followed", "following", "follows", "fond", "fondly", "food", "foods", "fooled", "foolhardy", "foolish", "foolishness", "foot", "foothold", "footing", "footmen", "footsteps", "for", "forbear", "forbearance", "forbid", "forbidden", "force", "forced", "forces", "forcible", "forcibly", "forcing", "ford", "forde", "fore", "foregoing", "forehead", "foreign", "foreigners", "forerunners", "forest", "forethought", "forfeited", "forfeiting", "forged", "forger", "forgeries", "forgers", "forgery", "forget", "forging", "forgiveness", "forgot", "forgotten", "fork", "forks", "form", "formal", "formality", "formally", "formation", "formations", "formed", "former", "formerly", "forming", "forms", "forrest", "forrester", "forster", "fort", "forth", "forthcoming", "forthwith", "fortitude", "fortnight", "fortress", "fortunate", "fortunately", "fortune", "fortunes", "forty", "forward", "forwarded", "forwards", "fostered", "fostering", "fouad", "fought", "foul", "found", "foundation", "foundations", "founded", "founder", "founders", "founts", "four", "fourpence", "fourpenny", "fourteen", "fourteenth", "fourth", "foxen", "fpcc", "fracture", "fragment", "fragments", "frame", "framed", "framers", "frames", "framing", "france", "francis", "franklin", "frantic", "fraud", "frauds", "fraudulent", "frazier", "fraziers", "frederick", "free", "freed", "freedom", "freely", "freemen", "freer", "freeway", "freight", "french", "frenchman", "frenchmen", "frenchwoman", "frenzy", "frequency", "frequent", "frequented", "frequently", "fresh", "freshly", "freshness", "friday", "fried", "friend", "friendless", "friends", "friendships", "frightened", "frightful", "frise", "fritz", "fro", "frog", "from", "front", "fronts", "frowns", "fruit", "fruitful", "frustration", "fry", "frys", "fuel", "fugitive", "fugitives", "fulfilled", "fulfillment", "full", "fullest", "fully", "fumigated", "fumigating", "function", "functionary", "functioning", "functions", "fund", "fundamental", "fundamentally", "funds", "funeral", "fungi", "funny", "furies", "furious", "furlongs", "furnace", "furnish", "furnished", "furniture", "further", "furtherance", "furthered", "furthermore", "futile", "futility", "future", "g", "gaily", "gain", "gained", "gains", "gale", "galleries", "gallery", "gallon", "gallons", "gallows", "galvanized", "gamble", "gambler", "gambling", "game", "games", "gaming", "gamins", "gang", "gangs", "gape", "garage", "garb", "gardelle", "garden", "gardens", "garnish", "garret", "garter", "gas", "gaseous", "gases", "gasoline", "gate", "gates", "gatesman", "gatesmen", "gathered", "gathering", "gave", "gaze", "gazed", "gee", "gen", "general", "generally", "generals", "generate", "generation", "generations", "generosity", "generous", "generously", "gentleman", "gentlemanly", "gentlemans", "gentlemen", "gently", "genuine", "geographic", "george", "gerald", "gerard", "gering", "german", "germans", "germany", "germs", "get", "getting", "ghastly", "ghent", "gibbet", "gibbon", "gibes", "giddy", "giesecke", "gift", "gifts", "gigantic", "gills", "giltspur", "gin", "ginger", "girl", "girls", "give", "given", "givens", "gives", "giving", "glad", "gladly", "glance", "glanced", "glances", "glaring", "glass", "glasses", "glazed", "gleeson", "glimpse", "globe", "gloom", "gloomy", "glories", "glory", "gloucester", "glove", "glucose", "gluttonously", "go", "goaded", "goal", "goals", "gobrias", "god", "godmanchester", "gods", "goes", "going", "gold", "gone", "good", "goode", "goodness", "goods", "goodwill", "gordon", "goree", "goring", "gossip", "got", "gothic", "gotten", "govern", "governed", "governing", "government", "governmental", "governments", "governor", "governors", "gown", "gowrie", "grab", "grabbed", "grabbing", "grace", "gracious", "grade", "gradually", "graham", "grain", "grains", "grand", "grandchildren", "grant", "granted", "graphic", "grasps", "grass", "grated", "grating", "gratings", "gratitude", "gratuitous", "grave", "gravely", "graves", "gravest", "gray", "graze", "grease", "greased", "greaser", "great", "greater", "greatest", "greatly", "greatness", "greed", "greek", "greeks", "green", "greenacre", "greenacres", "greer", "greet", "greeted", "gregory", "grenades", "gretna", "grew", "grey", "greyhound", "grief", "grievance", "grievous", "griffith", "griffiths", "grimms", "grinding", "grip", "grist", "groan", "grog", "grooming", "grooves", "gross", "grossness", "grosvenor", "grotesque", "ground", "grounded", "grounds", "group", "groups", "grow", "growing", "grown", "grows", "growth", "grudge", "gruel", "guarantee", "guard", "guarded", "guarding", "guards", "guessed", "guests", "guidance", "guide", "guidelines", "guides", "guildford", "guileless", "guilt", "guilty", "guinea", "guineas", "guinyard", "gun", "gunman", "guns", "gunshot", "gunsmith", "gunther", "gurney", "gutenberg", "guy", "h", "habit", "habits", "habitual", "habitually", "hackney", "had", "hadnt", "haggerty", "hair", "hairy", "haled", "half", "halfpence", "halfpenny", "halfway", "halifax", "hall", "hallway", "halted", "halter", "halters", "halting", "hammer", "hammock", "hampers", "hampshire", "hampstead", "hams", "hand", "handbill", "handbills", "handbook", "handcuffed", "handed", "handhold", "handicapped", "handicrafts", "handing", "handiwork", "handkerchief", "handkerchiefs", "handle", "handled", "handling", "handmade", "handprinting", "hands", "handsome", "handspikes", "handwriting", "hanfield", "hang", "hanged", "hanging", "hangman", "hangmans", "hanover", "haphazard", "happen", "happened", "happens", "happily", "happiness", "happy", "harass", "harassed", "harboring", "hard", "hardened", "hardihood", "hardly", "hardwicke", "hare", "harkness", "harm", "harmless", "harmony", "harold", "harris", "harrowby", "harrowbys", "harry", "harsh", "hartogs", "harvest", "harvey", "harwood", "has", "haste", "hastened", "hastily", "hat", "hatched", "hate", "hated", "hatfield", "hatred", "hats", "hatters", "hatton", "have", "having", "hawkins", "hayes", "haynau", "hazard", "hazards", "he", "head", "headed", "heading", "headline", "headlong", "headquarters", "heads", "health", "healthful", "healthy", "heap", "hear", "heard", "hearing", "heart", "hearted", "hearts", "hearty", "heat", "heated", "heath", "heating", "heaved", "heaven", "heavier", "heaviest", "heavily", "heaving", "heavy", "hebrew", "heed", "heel", "height", "heinous", "held", "helen", "helicopter", "hell", "helmet", "help", "helped", "helpful", "helpless", "hemp", "hence", "henry", "her", "herald", "herbert", "herded", "here", "hereditary", "heredity", "hereford", "heres", "heretofore", "heritage", "hermetically", "herodotus", "herself", "hertford", "hesitate", "hesitated", "hesitation", "heterogeneous", "hibbert", "hibner", "hickey", "hicks", "hid", "hidden", "hideel", "hidell", "hideous", "hiding", "higgledy", "high", "higher", "highest", "highly", "highway", "highwayman", "highwaymen", "hill", "hillah", "him", "himself", "hind", "hinder", "hindered", "hindrance", "hindsight", "hinges", "hinted", "hire", "hired", "hiring", "his", "hissing", "historic", "histories", "history", "hit", "hitherto", "hits", "hitting", "hoare", "hoary", "hobart", "hocker", "hockers", "hogan", "hogshead", "hold", "holder", "holders", "holding", "hole", "holes", "hollow", "holloway", "holmes", "holster", "home", "homely", "homes", "homicidal", "homicide", "homologies", "homologous", "homology", "hon", "honest", "honestly", "honesty", "honor", "honorable", "honored", "hook", "hooked", "hoover", "hope", "hoped", "hopeless", "hopelessly", "hopes", "horizon", "horn", "horncastle", "horns", "horrible", "horrid", "horror", "horrors", "horse", "horseback", "horsemonger", "horses", "hospital", "hospitals", "host", "hostile", "hostility", "hosts", "hosty", "hostys", "hot", "hotel", "hour", "hours", "house", "housebreakers", "household", "housekeeper", "housemaid", "houses", "houston", "how", "howard", "howards", "however", "howlett", "howse", "hoxton", "huddled", "huge", "huggin", "hughes", "hulks", "human", "humane", "humanitarianism", "humanity", "humanly", "humble", "humiliation", "hump", "hundred", "hundreds", "hung", "hunt", "hunting", "hunton", "hurried", "hurriedly", "hurry", "hurt", "husband", "husbands", "hush", "huts", "huxley", "hydrogen", "hymn", "hymns", "hysterical", "hysterics", "i", "idea", "ideal", "ideals", "ideas", "identical", "identifiable", "identification", "identifications", "identified", "identify", "identifying", "identity", "ideological", "idle", "idleness", "idolaters", "if", "ignominy", "ignorance", "ignorant", "ignored", "ikey", "ilchester", "ill", "illegal", "illegibility", "illegitimate", "illiberal", "illinois", "illiterate", "illness", "illuminated", "illustrate", "illustration", "illustrations", "im", "image", "imaginable", "imaginary", "imagination", "imaginative", "imagined", "imbibed", "imgur", "imitated", "imitates", "imitating", "imitation", "imitators", "immature", "immediate", "immediately", "immense", "immigrants", "imminent", "immoral", "immorality", "immured", "impact", "impairment", "impartial", "imparting", "impassioned", "impatient", "impecunious", "impeded", "impediment", "impediments", "impeding", "impelled", "impending", "impenitent", "imperative", "imperfect", "imperfectly", "imperial", "imperiled", "imperious", "implements", "implicated", "implied", "implored", "importance", "important", "imported", "importer", "imposed", "imposing", "imposition", "impossibility", "impossible", "impounded", "impoverished", "imprecations", "impregnable", "impress", "impressed", "impression", "impressions", "impressive", "imprisoned", "imprisonment", "improper", "improperly", "improve", "improved", "improvement", "improvements", "improvident", "improving", "impudently", "impugned", "impulse", "imputation", "imputed", "in", "inability", "inadequacy", "inadequate", "inasmuch", "inauguration", "inc", "incapable", "incarcerated", "incarceration", "incentive", "incentives", "incessant", "inch", "inches", "incident", "incidentally", "incidents", "incited", "inclement", "inclination", "inclined", "include", "included", "includes", "including", "income", "incomers", "incommodious", "incomplete", "inconceivable", "inconceivably", "inconclusive", "inconsiderable", "inconsistent", "inconvenience", "inconvenient", "incorporated", "incorrect", "incorrigible", "increase", "increased", "increasing", "increasingly", "incredible", "incriminating", "incumbent", "incurred", "indecision", "indeed", "indefatigable", "indefinitely", "independence", "independent", "independently", "indescribable", "index", "indexed", "indian", "indicate", "indicated", "indicates", "indicating", "indication", "indications", "indicted", "indictment", "indifference", "indifferent", "indignantly", "indignation", "indirect", "indirectly", "indiscriminate", "indiscriminately", "indispensable", "indispensably", "indisputable", "indistinguishable", "individual", "individuality", "individuals", "induce", "induced", "inducements", "indulge", "indulged", "indulgence", "industrial", "industries", "industrious", "industry", "inefficiency", "inefficient", "inequalities", "inevitable", "inevitably", "inexperienced", "inexpressible", "infant", "infantry", "infants", "infectious", "inference", "inferior", "infernal", "inferred", "infinite", "infirm", "infirmaries", "infirmary", "infirmity", "inflamed", "inflammation", "inflated", "inflict", "inflicted", "inflicting", "infliction", "influence", "influenced", "influences", "influential", "influx", "informal", "information", "informed", "informer", "infuse", "ingenious", "ingenuity", "ingest", "ings", "inhabitants", "inherent", "inherited", "inhuman", "iniquitous", "initial", "initials", "initiated", "initiative", "injure", "injured", "injuries", "injurious", "injury", "injustice", "injustices", "ink", "inmate", "inmates", "inn", "inner", "innocence", "innocent", "innovation", "innumerable", "inordinately", "inorganic", "inquest", "inquire", "inquired", "inquiries", "inquiry", "inquisitive", "insane", "insanity", "inscribed", "inscription", "inscriptions", "insect", "insecure", "insecurity", "insensible", "inseparable", "insert", "inserted", "inside", "insidious", "insight", "insignificant", "insist", "insisted", "insistence", "insolent", "insolvent", "inspect", "inspected", "inspection", "inspections", "inspector", "inspectors", "inspired", "instability", "instance", "instances", "instant", "instantaneous", "instantly", "instead", "instigation", "instigator", "instituted", "institution", "institutions", "instruct", "instructed", "instruction", "instructions", "instructor", "instructors", "instrument", "instrumentality", "instruments", "insufficiency", "insufficient", "insulted", "insulting", "insuperable", "insurance", "insurances", "insure", "insured", "intact", "integrity", "intellect", "intelligence", "intelligent", "intemperance", "intemperate", "intend", "intended", "intense", "intensified", "intensify", "intensive", "intent", "intention", "intentions", "interagency", "interceded", "intercept", "intercourse", "interest", "interested", "interesting", "interests", "interfere", "interfered", "interference", "interfering", "interim", "interior", "interment", "intermingled", "intermittently", "intermix", "internal", "international", "interpretation", "interpreted", "interpreter", "interrogated", "interrogation", "interruption", "intersection", "interstate", "interval", "intervals", "intervening", "intervention", "interview", "interviewed", "interviews", "intimacy", "intimate", "intimated", "intimately", "intimation", "into", "intolerant", "intoxicated", "intoxicating", "intoxication", "intricate", "introduce", "introduced", "introducing", "introduction", "intrusted", "invariable", "invariably", "inveigled", "inveigling", "invented", "invention", "inverse", "invest", "invested", "investigate", "investigated", "investigating", "investigation", "investigations", "investigative", "investigators", "investigatory", "investment", "investor", "investors", "invitation", "invitations", "invited", "invoice", "involve", "involved", "involvement", "involves", "involving", "inward", "inwards", "ionic", "ipswich", "irksome", "iron", "ironed", "ironing", "irons", "irrational", "irregularities", "irregularity", "irremediable", "irresistible", "irresistibly", "irrespective", "irresponsible", "irrevocable", "irrevocably", "irritable", "irritated", "irving", "is", "islands", "islington", "isolated", "isolation", "issue", "issued", "issues", "isthe", "it", "italian", "italy", "item", "items", "itinerant", "itinerary", "its", "itself", "ive", "j", "jack", "jacket", "jackson", "jacobite", "jacobus", "jacques", "jail", "jailer", "jails", "james", "jamess", "jane", "january", "japan", "jarman", "jarred", "jarrow", "jaws", "jealous", "jeanette", "jeanne", "jebb", "jeered", "jeers", "jefferson", "jem", "jenkins", "jenson", "jensons", "jeopardized", "jeremy", "jerking", "jerome", "jersey", "jerusalem", "jesse", "jests", "jew", "jewel", "jeweler", "jewelers", "jewelery", "jewels", "jews", "job", "jobs", "john", "johnny", "johnson", "johnsons", "join", "joined", "joining", "joint", "jointed", "joints", "joke", "jollity", "jones", "jordan", "joseph", "joshua", "journal", "journalists", "journals", "journey", "journeys", "jowl", "joy", "jr", "judge", "judged", "judges", "judgment", "judice", "judicial", "judiciary", "judicious", "juices", "julian", "july", "jumbled", "jump", "jumped", "june", "junior", "jupiter", "juries", "jurisdiction", "jurisdictions", "jurisprudence", "jury", "just", "justice", "justices", "justification", "justified", "justify", "justinian", "justly", "juvenile", "juveniles", "kaiser", "kate", "katherine", "kay", "kean", "keen", "keenest", "keenness", "keep", "keeper", "keepers", "keeping", "keeps", "keith", "kellerman", "kelly", "kendal", "kennedy", "kennedys", "kennel", "kenneth", "kennington", "kent", "kept", "ker", "kerp", "kers", "ketch", "ketchs", "key", "keys", "khrushchev", "kicked", "kidderminster", "kilburn", "kill", "killed", "killer", "killing", "kind", "kindled", "kindly", "kindness", "kinds", "kinetic", "king", "kingdom", "kingdoms", "kings", "kingston", "kitchen", "kleins", "knack", "knead", "knee", "kneel", "kneeling", "knees", "knell", "knelt", "knew", "knife", "knight", "knives", "knock", "knocked", "knots", "knotted", "know", "knowing", "knowledge", "known", "knows", "krapps", "l", "labor", "laboratory", "labored", "laborers", "lace", "lack", "lad", "ladder", "ladies", "lading", "lads", "lady", "ladys", "lagged", "laid", "lain", "lake", "lamar", "lamb", "lambeth", "lambs", "lamentable", "lamentation", "lancaster", "land", "landed", "landing", "landlady", "lands", "lane", "language", "languid", "languished", "lap", "lapels", "lapse", "lapses", "lapsing", "large", "largely", "larger", "largest", "last", "lasted", "lasts", "late", "lately", "latent", "later", "lateral", "laterally", "latest", "lath", "latin", "latona", "latonas", "latter", "latterly", "latters", "laudable", "laugh", "laughed", "laughing", "laughter", "launched", "laundry", "lavender", "laverstock", "lavish", "law", "lawful", "lawn", "lawrence", "laws", "lawson", "lawsons", "lawyers", "lax", "laxity", "lay", "layer", "layers", "laying", "le", "leach", "lead", "leaden", "leader", "leaders", "leadership", "leading", "leads", "leaf", "leaned", "leant", "leap", "learn", "learned", "learning", "learnt", "lears", "least", "leather", "leave", "leaves", "leaving", "lectures", "led", "lee", "leeds", "lees", "leew", "left", "leg", "legacies", "legacy", "legal", "legend", "legends", "legged", "legibility", "legible", "legislating", "legislation", "legislative", "legislature", "legitimate", "legs", "leicester", "length", "lengthened", "lengths", "lengthy", "leniency", "lennie", "leon", "lerigo", "less", "lessened", "lesser", "lesson", "lessons", "lest", "let", "lethal", "lets", "letter", "letterpress", "letters", "letting", "level", "leveled", "lever", "levied", "levity", "levy", "levying", "lewdness", "lewis", "liabilities", "liable", "liaison", "libels", "liberal", "liberate", "liberties", "liberty", "libitum", "librivox", "lie", "lied", "lies", "lieutenancy", "lieutenant", "life", "lift", "lifted", "lifting", "lifts", "light", "lighted", "lighter", "lighting", "lightly", "lights", "like", "liked", "likelihood", "likely", "likeness", "likewise", "limbo", "limbs", "lime", "limit", "limitation", "limited", "limiting", "limits", "limousine", "lincoln", "lincolnshire", "line", "lineal", "lined", "linen", "lines", "lineup", "lineups", "lingered", "lingering", "lining", "link", "linked", "linking", "links", "linnie", "lion", "lions", "lip", "liquid", "liquids", "liquor", "liquorpond", "liquors", "list", "listed", "listen", "listened", "listening", "listing", "literally", "literary", "literature", "little", "live", "lived", "livelihood", "lively", "liver", "liveries", "liverpool", "lives", "living", "load", "loaded", "loading", "loads", "loaf", "loans", "loathsome", "loaves", "lobby", "local", "localities", "locality", "locally", "locate", "located", "location", "lock", "locked", "locking", "locomotion", "locomotive", "lodge", "lodged", "lodger", "lodging", "lodgings", "loft", "lofty", "log", "london", "lonely", "long", "longer", "longest", "longman", "look", "looked", "looking", "looks", "loomed", "looms", "loop", "loops", "loose", "loosen", "loosened", "lopez", "lord", "lords", "lordship", "lordships", "los", "lose", "loses", "losing", "loss", "lost", "lot", "loud", "louder", "loudon", "louisiana", "love", "lovelady", "loving", "low", "lowe", "lower", "lowered", "lowest", "lowing", "loyal", "loyalty", "lt", "lubeck", "luck", "luckless", "lucky", "ludgate", "ludicrously", "luigi", "lukewarm", "lump", "lumped", "lunacy", "lunatic", "lunatics", "lunch", "luncheon", "lunchroom", "lundy", "lung", "lungs", "lurched", "luxuries", "luxurious", "luxury", "lying", "lymph", "lyndon", "lyons", "m", "machine", "machinery", "machines", "mackay", "mackintosh", "mad", "madame", "maddened", "made", "madness", "mae", "magazines", "magic", "magistracy", "magistrate", "magistrates", "magnates", "magnified", "magnifying", "magnitude", "maid", "maiden", "maidservant", "maidstone", "mail", "mailbox", "mailing", "mailroom", "maimed", "maiming", "main", "mainly", "maintain", "maintained", "maintaining", "maintains", "maintenance", "maintz", "maison", "maj", "majesty", "majestys", "major", "majority", "make", "makes", "making", "malady", "malcolm", "male", "malefactors", "males", "maliciously", "malignant", "mall", "malpractices", "maltby", "mammal", "man", "manacled", "manacling", "manage", "manageable", "managed", "management", "manager", "managers", "manchester", "mandate", "mandella", "manifest", "manifestation", "manifestations", "manifestly", "manifold", "manipulation", "manned", "manner", "mannered", "manners", "manning", "mannings", "mannlicher", "manor", "manpower", "mans", "mansion", "manslaughter", "manual", "manufactory", "manufacture", "manufactured", "manufacturers", "manufactures", "manumitted", "many", "map", "march", "marched", "marching", "marduk", "margaret", "margin", "marguerite", "marguerites", "marie", "marina", "marine", "marines", "marion", "mark", "marked", "market", "markets", "markham", "markhams", "marks", "marksman", "marksmanship", "marksmen", "marley", "marquis", "marriage", "married", "marrs", "marry", "marrying", "mars", "marsalis", "marsh", "marshal", "marshals", "marshalsea", "marsolino", "mart", "martha", "martial", "martin", "marvelous", "marwood", "marxism", "marxist", "mary", "marylebone", "mask", "masonry", "masons", "mass", "massacre", "massage", "masses", "massive", "master", "masters", "mat", "matched", "matches", "mate", "material", "materially", "materials", "mates", "matrices", "matron", "mats", "matter", "mattered", "matters", "matthew", "matting", "mattress", "mature", "maturity", "maudlin", "maximum", "may", "maybe", "maynard", "mayor", "mcbride", "mcclelland", "mcdonald", "mckinley", "mcwatters", "me", "meager", "meal", "meals", "mean", "meandering", "meaning", "meaningful", "meaningless", "means", "meant", "meanwhile", "measure", "measured", "measures", "measuring", "meat", "mechanism", "mediation", "medical", "medically", "medicine", "meditation", "meditations", "mediterranean", "meek", "meet", "meeting", "meetings", "melancholy", "mell", "melted", "member", "members", "membership", "memorable", "memoranda", "memorandum", "memorial", "memorials", "memory", "men", "menaced", "menial", "mens", "mental", "mentelin", "mention", "mentioned", "mentioning", "mentions", "merchant", "merchants", "mercies", "merciful", "mercifully", "mercy", "mere", "merely", "merionethshire", "merited", "meritorious", "merits", "merrily", "merriment", "message", "messenger", "messengers", "messrs", "met", "metabolism", "metal", "metallic", "method", "methods", "metropolis", "metropolitan", "mexico", "michael", "michaelis", "michaelmas", "microscope", "microscopic", "middle", "middlesex", "midnight", "midst", "midway", "midweek", "might", "mighty", "milan", "mild", "mile", "miles", "militant", "military", "militia", "milk", "mill", "millbank", "miller", "millimeter", "million", "millions", "mills", "millstones", "milton", "mince", "mind", "minded", "minds", "mine", "mineral", "mingled", "minimize", "minimum", "minister", "ministers", "ministration", "ministrations", "minor", "minsk", "mint", "minute", "minutely", "minutes", "minutest", "minver", "misapplication", "misapprehension", "misappropriated", "misappropriating", "miscellaneous", "mischief", "misconduct", "miscreant", "misdeeds", "misdemeanant", "misdemeanants", "misdemeanors", "misdirected", "misemployment", "miserable", "miserably", "misery", "mishap", "mismanagement", "misplaced", "misrepresentations", "miss", "missal", "missals", "missed", "misseurs", "missile", "missiles", "missing", "mission", "misspelling", "mistake", "mistaken", "mistress", "misunderstood", "misuse", "mitigate", "mitigating", "mitigation", "mix", "mixed", "mixing", "moat", "mob", "mobbs", "mobility", "mockery", "mode", "model", "moderate", "modern", "modes", "modification", "modifications", "modified", "mohrenschildt", "moisture", "molasses", "molded", "moment", "moments", "monastery", "monday", "monetary", "money", "monger", "monies", "monitor", "monopolize", "monopolized", "monopoly", "monotonous", "monsters", "montgomery", "month", "months", "moon", "moonshine", "moorfields", "mop", "mops", "moral", "morally", "morals", "morbidly", "more", "moreover", "morning", "mornings", "morpeth", "morphology", "mortally", "mortals", "mortar", "mortem", "mortgages", "moscow", "moses", "moss", "most", "mostly", "mother", "mothers", "motion", "motionless", "motivated", "motivation", "motive", "motives", "motorcade", "motorcycle", "motorcycles", "mould", "mound", "mounds", "mount", "mountain", "mountainous", "mounted", "mounting", "mouth", "mouths", "movable", "move", "moved", "movement", "movements", "mover", "moves", "moving", "mr", "mrs", "mss", "much", "mud", "mule", "mules", "mullay", "muller", "multiple", "multiplied", "multitude", "municipal", "murder", "murdered", "murderer", "murderers", "murderess", "murdering", "murderous", "murders", "murmur", "murphy", "murret", "muscle", "muscles", "muscular", "museum", "museums", "must", "mustered", "mute", "mutilated", "mutineers", "mutter", "muttering", "mutual", "mutually", "muzzle", "mwddy", "my", "myself", "mysterious", "mystery", "n", "nabonidus", "nabopolassar", "nagging", "nails", "naked", "nakedness", "name", "named", "namely", "names", "narrative", "narrow", "narrowed", "narrowing", "narrowly", "nasty", "nation", "national", "nations", "native", "natives", "natural", "naturally", "nature", "nauseous", "naval", "navigate", "navy", "ne", "near", "nearby", "nearer", "nearest", "nearly", "neat", "neatly", "nebuchadnezzar", "nebuchadnezzars", "necessaries", "necessarily", "necessary", "necessities", "necessity", "neches", "neck", "necks", "need", "needed", "needing", "needlessly", "needs", "needy", "neely", "nefarious", "negative", "negatively", "negatives", "neglect", "neglected", "negotiate", "negotiating", "neighbor", "neighborhood", "neighboring", "neighbors", "neild", "neilds", "neither", "nerve", "nerves", "nervous", "nest", "net", "netherlands", "netting", "network", "neurological", "never", "nevertheless", "new", "newest", "newgate", "newly", "newman", "newmans", "news", "newsman", "newspaper", "newspapers", "next", "nice", "nicholas", "nicholson", "nickname", "nicol", "niggardliness", "night", "nights", "nimitti", "nine", "nineteen", "nineteenth", "ninety", "nineveh", "ninth", "nitrates", "nitrogen", "nixon", "no", "noble", "nobody", "nocturnal", "nodded", "noise", "noisy", "nominally", "nominated", "non", "noncommissioned", "none", "nonexistent", "nonsense", "noon", "noose", "nor", "norfolk", "normal", "normally", "norman", "north", "northeast", "northern", "northwest", "norwich", "nose", "noses", "not", "notable", "notably", "note", "notebook", "noted", "notes", "nothing", "notice", "noticeable", "noticed", "notification", "notified", "notify", "notion", "notorieties", "notoriety", "notorious", "notoriously", "notwithstanding", "nought", "nourished", "nova", "novel", "novels", "novelty", "november", "novo", "now", "nowhere", "noxious", "noyes", "number", "numbered", "numberless", "numbers", "numerals", "numerous", "nurse", "nursery", "nutriment", "nutrition", "nutritive", "o", "oak", "oakum", "oath", "oaths", "obey", "obeyed", "object", "objected", "objection", "objectionable", "objections", "objective", "objects", "obligation", "obliged", "obliterated", "obliteration", "obscene", "obscenity", "obscure", "obscured", "observable", "observance", "observation", "observations", "observe", "observed", "observer", "observers", "observes", "observing", "obsolete", "obstacles", "obtain", "obtained", "obtaining", "obtains", "obvious", "obviously", "occasion", "occasional", "occasionally", "occasioned", "occasions", "occupant", "occupants", "occupation", "occupations", "occupied", "occupy", "occupying", "occur", "occurred", "occurrences", "occurs", "ocean", "oclock", "oconnor", "oconnors", "october", "odd", "oddity", "odell", "odonnell", "of", "off", "offender", "offenders", "offense", "offenses", "offensive", "offer", "offered", "offering", "offers", "office", "officer", "officers", "offices", "official", "officially", "officials", "often", "oftener", "oh", "oil", "oiled", "old", "older", "oldest", "olympic", "omally", "omissions", "omit", "omitted", "on", "once", "one", "ones", "only", "onto", "ontogeny", "onus", "onwards", "opaque", "open", "opened", "opening", "openly", "operate", "operated", "operates", "operating", "operation", "operations", "operator", "opinion", "opinions", "opponents", "opportunities", "opportunity", "oppose", "opposed", "opposing", "opposite", "opposition", "oppression", "oppressive", "or", "oral", "orange", "oranges", "ordeal", "order", "ordered", "ordering", "orderly", "orders", "ordinarily", "ordinary", "ordinarys", "org", "organ", "organic", "organism", "organisms", "organization", "organizations", "organize", "organized", "organs", "oriental", "origin", "original", "originally", "originated", "orleans", "ornament", "ornamentation", "ornamented", "orphans", "orthodox", "osborne", "osmosis", "ostensibly", "oswald", "oswalds", "other", "others", "otherwise", "ought", "ounces", "our", "ours", "ourselves", "out", "outbreak", "outdone", "outer", "outfitted", "outlay", "outlet", "outlined", "outrage", "outraged", "outrageous", "outrages", "outright", "outset", "outside", "outskirts", "outspoken", "outstanding", "oven", "over", "overall", "overbearing", "overboard", "overcome", "overcrowded", "overcrowding", "overdo", "overend", "overflowed", "overflowing", "overhaul", "overhead", "overleap", "overlooked", "overlooking", "overpass", "overpasses", "overseers", "oversight", "overtake", "overthrow", "overthrown", "overtones", "overtook", "overwhelming", "owen", "owing", "own", "owned", "owner", "owners", "ownership", "owning", "ox", "oxenford", "oxford", "oxfords", "oxidation", "oxon", "oxygen", "oxygenation", "p", "pace", "pacing", "package", "packages", "packed", "packing", "page", "pages", "paget", "paid", "pail", "pain", "paine", "paines", "painful", "pains", "painted", "painters", "pair", "pal", "palace", "palaces", "pale", "paleness", "palliation", "palm", "palmer", "palmers", "palmprint", "palmprints", "paltry", "pampering", "pamphlet", "pan", "panels", "panes", "pangs", "panic", "pannartz", "pans", "pantaloons", "pantry", "paper", "papers", "parade", "paraded", "parades", "paraffin", "paragraph", "parallel", "paralyzed", "paramount", "paramours", "paranoid", "parcel", "parceled", "parcels", "pardon", "pardoned", "parentage", "parents", "paris", "parish", "park", "parked", "parking", "parkland", "parliament", "parliamentary", "parma", "part", "partial", "partially", "participate", "participated", "particular", "particularly", "particulars", "parties", "partitioned", "partly", "partners", "partook", "partridge", "parts", "party", "paso", "pass", "passage", "passages", "passbook", "passed", "passenger", "passengers", "passes", "passing", "passion", "passionate", "passions", "passive", "passport", "past", "paste", "pasties", "patch", "pate", "path", "pathological", "patient", "patiently", "patrick", "patriotic", "patriotism", "patriots", "patrol", "patrolled", "patrolman", "patrolmen", "patron", "patronized", "pattern", "patterson", "patton", "paul", "pause", "pauses", "pawnbrokers", "pay", "paying", "payment", "payments", "pea", "peace", "peaceably", "peaceful", "peacefully", "pear", "pearson", "peculiar", "peculiarities", "pecuniary", "peel", "peer", "peered", "peers", "pegsworth", "pell", "pembroke", "penal", "penalties", "penalty", "pence", "pending", "penitent", "penitentiary", "penman", "pennant", "pennsylvania", "penny", "pennyworth", "pens", "pension", "pensions", "pentonville", "penultimate", "penury", "people", "peoples", "pepper", "per", "percent", "perception", "percival", "peremptorily", "peremptory", "perennibranch", "perennibranchs", "perfect", "perfection", "perfectly", "perforations", "perform", "performance", "performed", "performing", "perfunctory", "perhaps", "perilous", "period", "periodic", "periodical", "periodically", "periods", "peripheral", "perish", "perished", "perjury", "permanent", "permanently", "permissible", "permission", "permissive", "permit", "permitted", "pernicious", "perpetrated", "perpetration", "perpetrator", "perpetrators", "perpetual", "perpetually", "perpetuated", "perpetuation", "perquisite", "perry", "persecuted", "persecution", "perseverance", "persevered", "persia", "persian", "persians", "persistent", "persistently", "person", "personage", "personages", "personal", "personalities", "personality", "personally", "personated", "personnel", "persons", "perspective", "persuaded", "persuasion", "persuasive", "pertinently", "peter", "peters", "petition", "petitioned", "petitioner", "petitions", "petrified", "petticoats", "petty", "petworth", "pew", "phase", "phases", "phenomena", "phial", "philanthropic", "philanthropist", "philanthropists", "philanthropy", "phillips", "phipoe", "phoebe", "phone", "photograph", "photographed", "photographer", "photographers", "photographic", "photographs", "photography", "phrase", "physical", "physically", "physician", "physiology", "pic", "pick", "picked", "pickets", "picking", "pickup", "pics", "picture", "pictures", "pie", "piece", "pieces", "pieman", "piemen", "pierce", "piercingly", "pies", "piety", "piggledy", "pilasters", "pile", "piled", "pilgrim", "pillage", "pillory", "pillow", "pills", "pilot", "pimlico", "pin", "pinched", "pinfold", "pinioned", "pinioning", "pint", "pious", "pipe", "piper", "pipes", "pirates", "pistol", "pistols", "pit", "pitch", "pitchforked", "pitiable", "pittance", "pity", "placards", "place", "placed", "placement", "places", "placing", "plain", "plainly", "plaintiffs", "plan", "plane", "plank", "planks", "planned", "planning", "plans", "plant", "planted", "plants", "plasterers", "plastic", "plate", "plateau", "plates", "platform", "plausible", "play", "played", "players", "playing", "plays", "plaza", "plea", "plead", "pleaded", "pleadings", "pleas", "pleasant", "pleasanter", "please", "pleased", "pleasure", "pleasures", "pledged", "plentiful", "plight", "plot", "plotting", "plowed", "plunged", "plus", "pocket", "pocketbook", "pocketed", "pockets", "poe", "poetical", "poetry", "point", "pointed", "pointing", "points", "poison", "poisoned", "poisoner", "poisoning", "poisons", "poker", "police", "policeman", "policemen", "policies", "policy", "political", "politician", "polluted", "pompous", "pond", "pool", "poor", "poorer", "populace", "popular", "population", "populous", "porch", "pork", "port", "porter", "porters", "portion", "portions", "portland", "portrait", "posed", "position", "positions", "positive", "positively", "possess", "possessed", "possessing", "possession", "possessions", "possessor", "possibilities", "possibility", "possible", "possibly", "post", "postage", "postal", "posted", "postilions", "posting", "postponed", "posts", "pot", "potato", "potatoes", "potential", "potentially", "potman", "poultry", "pound", "pounding", "pounds", "pour", "poverty", "powder", "power", "powerful", "powerless", "powers", "powerscourt", "practical", "practically", "practice", "practiced", "practices", "practicing", "practitioner", "practitioners", "praecipe", "praise", "praises", "praiseworthy", "pray", "prayer", "prayers", "praying", "preached", "preacher", "preachers", "preaches", "preaching", "preamble", "precarious", "precautions", "preceded", "precedence", "precedent", "preceding", "precinct", "precincts", "precious", "precipitancy", "precise", "precisely", "precision", "preclude", "predecessor", "predecessors", "prefer", "preference", "preferred", "preferring", "prefers", "prejudice", "preliminary", "premeditated", "premeditation", "premises", "premising", "preparation", "preparations", "preparatory", "prepare", "prepared", "preparing", "preponderance", "prerogative", "prescribed", "prescription", "presence", "present", "presentation", "presented", "presenting", "presently", "presentment", "presentments", "presents", "preservation", "preserve", "preserved", "preserver", "presided", "presidency", "president", "presidential", "presidents", "press", "pressed", "presses", "pressing", "pressure", "presumably", "presumed", "pretended", "pretense", "pretension", "pretext", "pretty", "prevail", "prevailed", "prevailing", "prevalent", "prevent", "prevented", "preventing", "preventive", "previous", "previously", "prey", "preying", "price", "prices", "pricking", "pride", "pries", "priests", "primarily", "primary", "prime", "primer", "prince", "principal", "principally", "principals", "principle", "principles", "print", "printed", "printer", "printers", "printing", "prints", "prior", "priority", "priory", "prison", "prisoner", "prisoners", "prisons", "privacy", "private", "privately", "privation", "privilege", "privileged", "privileges", "privy", "prize", "pro", "probabilities", "probability", "probable", "probably", "probation", "probative", "probert", "proberts", "problem", "problems", "procedure", "procedures", "proceed", "proceeded", "proceeding", "proceedings", "proceeds", "process", "processed", "processes", "processing", "procession", "proclaimed", "proclivities", "procrastination", "proctors", "procurable", "procure", "procured", "produce", "produced", "produces", "product", "production", "productions", "productive", "profane", "professed", "professes", "profession", "professional", "profit", "profitable", "profits", "profligacy", "profound", "profoundest", "profoundly", "profusely", "program", "programs", "progress", "prohibited", "prohibitory", "project", "projected", "projecting", "projections", "projects", "prolong", "prolonged", "prominence", "prominent", "prominently", "promiscuous", "promise", "promised", "promising", "promissory", "promote", "promoted", "promoters", "prompt", "prompted", "promptly", "promulgation", "prongs", "pronounced", "proof", "proofs", "propaganda", "propagated", "propensities", "propensity", "proper", "properly", "property", "proportion", "proportionately", "proportions", "proposal", "proposals", "propose", "proposed", "proposition", "propositions", "prosecuted", "prosecution", "prospect", "prospective", "prosper", "prospered", "prosperity", "prosperous", "prosperously", "prostitutes", "protect", "protected", "protecting", "protection", "protective", "protector", "proteid", "proteids", "protein", "protest", "protested", "protests", "protoplasm", "protracted", "proud", "prove", "proved", "proves", "provide", "provided", "providentially", "provides", "providing", "province", "provinces", "provincial", "provision", "provisional", "provisions", "proviso", "prs", "prudery", "psalters", "psychiatric", "psychiatrist", "psychological", "public", "publican", "publication", "publications", "publicity", "publicized", "publicly", "published", "publishers", "publishing", "puerto", "pull", "pulled", "pulling", "pulpit", "pulse", "pump", "pumps", "punched", "punches", "punctiliously", "punctually", "pungent", "punish", "punishable", "punished", "punishment", "punishments", "puppy", "purchase", "purchased", "purchases", "purchasing", "pure", "purely", "purging", "purity", "purlieus", "purported", "purporting", "purpose", "purposely", "purposes", "pursuance", "pursue", "pursued", "pursuing", "pursuit", "purveyors", "pushed", "puss", "put", "putrid", "putting", "python", "quadrangle", "quaker", "qualification", "qualifications", "qualified", "qualifying", "qualities", "quality", "quantities", "quantity", "quarrel", "quarreling", "quarrels", "quart", "quarter", "quarterly", "quarters", "queen", "queens", "quelled", "queries", "question", "questioned", "questioning", "questions", "quick", "quickening", "quicklime", "quickly", "quid", "quiet", "quieted", "quietly", "quigley", "quigleys", "quills", "quit", "quite", "quote", "quoted", "quotes", "r", "rabbit", "race", "raced", "rackets", "radiated", "radiation", "radical", "radically", "radio", "radioed", "raff", "rage", "ragged", "rags", "railing", "railroad", "railroads", "railway", "rain", "raise", "raised", "raising", "raked", "ralph", "rambler", "rambling", "ramifications", "rampart", "ran", "randle", "rang", "range", "ranged", "rank", "ranking", "rankled", "rapacity", "rapid", "rapidly", "rare", "rarely", "rash", "ratcliffe", "rate", "rates", "rathbone", "rather", "ratification", "ratified", "ration", "rational", "rations", "rats", "raw", "ray", "re", "reach", "reached", "reaches", "reaching", "reacted", "reaction", "read", "reader", "readers", "readily", "readiness", "reading", "ready", "real", "reality", "realize", "realized", "realizing", "really", "realm", "reappeared", "reappropriate", "rear", "rearranged", "reason", "reasonable", "reasonably", "reasoning", "reasons", "rebelled", "rebellion", "rebuild", "rebuilding", "rebuilt", "rebuke", "recall", "recalled", "recapitulation", "recapture", "recaptured", "receipt", "receipts", "receive", "received", "receiver", "receivers", "receives", "receiving", "recent", "recently", "receptacle", "reception", "recipient", "recites", "reckless", "recklessly", "recklessness", "reckoned", "recognition", "recognize", "recognized", "recognizes", "recognizing", "recollectedness", "recollection", "recommence", "recommend", "recommendation", "recommendations", "recommended", "recommends", "reconstruct", "reconstructed", "reconstructing", "reconstruction", "record", "recorded", "recorder", "recorders", "recording", "records", "recover", "recovered", "recovery", "recruited", "recur", "recurrence", "red", "redeem", "redesdales", "redpath", "redpaths", "reduce", "reduced", "reduction", "reed", "reeds", "reels", "refer", "reference", "references", "referral", "referrals", "referred", "referring", "refers", "reflect", "reflected", "reflection", "reflects", "reflex", "reform", "reformation", "reformatory", "reformers", "reforms", "refractory", "refrain", "refreshed", "refuge", "refuges", "refuse", "refused", "refusing", "refuted", "regained", "regard", "regarded", "regarding", "regardless", "regards", "regime", "register", "registers", "registrar", "registration", "regret", "regular", "regularly", "regulated", "regulation", "regulations", "reid", "reids", "reign", "reigned", "reigns", "reinforced", "reiterate", "reiterating", "reiteration", "rejected", "rejecting", "rejection", "rejoiced", "relapse", "relapsed", "relapsing", "relate", "related", "relating", "relation", "relations", "relationship", "relationships", "relative", "relatively", "relatives", "relax", "relaxation", "relaxed", "release", "released", "relegated", "reliable", "reliance", "relied", "relief", "reliefs", "relieve", "relieved", "religion", "religious", "relish", "relocation", "reluctance", "reluctantly", "rely", "remain", "remainder", "remained", "remaining", "remains", "remark", "remarkable", "remarkably", "remarked", "remarks", "remediable", "remedial", "remedied", "remedies", "remedy", "remember", "remembered", "remind", "reminded", "remington", "remissness", "remonstrance", "remonstrate", "remorse", "remoteness", "removal", "remove", "removed", "removing", "remunerated", "remunerative", "render", "rendered", "renders", "renewed", "renounce", "rent", "rental", "rentals", "rented", "reopened", "reorganization", "repair", "repaired", "repealed", "repeat", "repeated", "repeatedly", "repelled", "repentance", "repetition", "replace", "replaced", "replica", "replied", "replies", "reply", "report", "reported", "reporter", "reporting", "reports", "reprehended", "reprehensible", "reprehension", "representations", "representative", "representatives", "represented", "representing", "represents", "repressed", "reprieve", "reprieved", "reproach", "reprobation", "reproduced", "reproduction", "reproves", "reptile", "repudiated", "repugnance", "repulsive", "reputation", "repute", "reputed", "request", "requested", "requesting", "requests", "require", "required", "requirement", "requirements", "requires", "requiring", "requisition", "rescue", "rescued", "research", "researches", "resemblance", "resemblances", "resemble", "resembled", "resembling", "resented", "reserve", "reserved", "reserves", "reside", "resided", "residence", "resident", "residents", "residing", "resignation", "resigned", "resist", "resistance", "resistant", "resisted", "resolution", "resolutions", "resolve", "resolved", "resorted", "resounded", "resources", "respect", "respectability", "respectable", "respected", "respecting", "respective", "respectively", "respects", "respiration", "respiratory", "respite", "respited", "respond", "responded", "response", "responses", "responsibilities", "responsibility", "responsible", "rest", "rested", "resting", "restless", "restoration", "restore", "restored", "restrain", "restraining", "restraint", "restraints", "restricted", "restrictions", "restrictive", "result", "resulted", "resulting", "results", "resume", "resumed", "resurrectionist", "retailed", "retain", "retained", "retaining", "retains", "reticent", "retire", "retired", "retirement", "retiring", "retorted", "retouched", "retouching", "retrace", "retrenched", "retribution", "retrograde", "return", "returned", "returning", "returns", "rev", "reveal", "revealed", "revealing", "reveals", "revelations", "reveling", "revelries", "revelry", "revels", "revenge", "revenues", "reverend", "reverse", "reversed", "review", "reviewed", "reviewing", "reviews", "revill", "revision", "revival", "revive", "revived", "revolution", "revolutionary", "revolutionize", "revolver", "reward", "rewarded", "reynolds", "rheumatism", "rice", "rich", "richard", "richly", "richmond", "richness", "rick", "rid", "ridden", "ride", "riders", "ridicule", "ridiculous", "riding", "riff", "rifle", "rifled", "rifleman", "rifles", "rifling", "right", "rightful", "rights", "rightwing", "rigid", "rigidly", "ring", "ringleaders", "rings", "riot", "rioters", "rioting", "riotous", "riots", "ripping", "rise", "risen", "rises", "rising", "risk", "risks", "river", "rivers", "road", "roar", "roared", "roaring", "roast", "rob", "robarts", "robbed", "robberies", "robbery", "robert", "roberts", "robson", "robsons", "rochester", "rod", "rode", "rods", "roe", "roehampton", "rogues", "roistering", "role", "roles", "roll", "rolled", "rolling", "rolls", "rolt", "roman", "romanes", "rome", "romeos", "romilly", "ronald", "roof", "roofs", "room", "roominghouse", "rooms", "roosevelt", "root", "rooted", "roots", "rope", "ropes", "rose", "rosy", "rotten", "rouge", "rough", "roughly", "round", "rounded", "rounder", "rounds", "roupell", "roupells", "rouse", "roused", "route", "routes", "routine", "roux", "row", "rowley", "rows", "roy", "royal", "royally", "rubeus", "rude", "rudely", "rudiment", "rudimentary", "rudiments", "rufus", "rug", "rugeley", "rugs", "ruin", "ruined", "ruins", "rule", "rulers", "rules", "ruling", "run", "running", "runs", "rush", "rushed", "russell", "russells", "russia", "russian", "russians", "rustling", "ruth", "s", "saccharine", "sack", "sacred", "sacrifice", "sacrificing", "sacrilege", "safe", "safeguard", "safeguards", "safely", "safer", "safes", "safest", "safety", "sagacity", "said", "sailing", "sailor", "sailors", "sake", "salaries", "salary", "sale", "sales", "salt", "salts", "salutary", "salute", "sam", "same", "sample", "samples", "samuel", "san", "sanctioned", "sanctions", "sand", "sandwiches", "sane", "sank", "santa", "santos", "sap", "sarah", "sarcasms", "sash", "sat", "satisfaction", "satisfactorily", "satisfactory", "satisfied", "satisfy", "satisfying", "sattler", "saturated", "saturday", "saturnalia", "saucepan", "savage", "savagely", "save", "saved", "saving", "savings", "saw", "saward", "sawards", "sawyer", "say", "saying", "says", "scaffold", "scale", "scan", "scandal", "scanned", "scanning", "scanty", "scarcely", "scarcity", "scarlett", "scattered", "scavenger", "scene", "scenes", "schedule", "scheduled", "scheme", "scheming", "schoeffer", "school", "schoolmaster", "schoolmasters", "schools", "schussler", "science", "scientific", "scoggins", "scope", "scorched", "score", "scored", "scores", "scorn", "scorned", "scornful", "scotched", "scotia", "scotland", "scottish", "scrape", "scraped", "scream", "screamed", "screen", "screened", "screws", "scroll", "scruple", "scruples", "scrupulously", "scuffle", "sea", "sealed", "seaport", "search", "searched", "searches", "searching", "seas", "season", "seat", "seated", "seats", "sebastian", "seclusion", "second", "secondary", "secondly", "seconds", "secrecy", "secret", "secretaries", "secretary", "secrete", "secreted", "secretly", "section", "sections", "secure", "secured", "securely", "securing", "securities", "security", "sedition", "seduced", "seducer", "seduction", "see", "seeds", "seedy", "seeing", "seek", "seeking", "seeks", "seem", "seemed", "seeming", "seemingly", "seems", "seen", "sees", "seized", "seizing", "seldom", "select", "selected", "selection", "selective", "seleucia", "self", "sell", "seller", "selling", "semblance", "semi", "senate", "senator", "send", "sending", "sensation", "sensational", "sense", "senseless", "sensibly", "sensitive", "sent", "sentence", "sentenced", "sentences", "sentiment", "sentry", "separate", "separated", "separately", "separating", "separation", "september", "sepulchre", "sepulchres", "sergeant", "sergeants", "serial", "series", "serious", "seriously", "seriousness", "sermon", "sermons", "serpent", "serpents", "servant", "servants", "serve", "served", "service", "services", "serving", "servitude", "session", "sessions", "set", "sets", "setting", "settled", "settlement", "settles", "settling", "seven", "seventeen", "seventeenth", "seventh", "seventy", "several", "severe", "severely", "severity", "sewers", "sexes", "sexual", "sgt", "shackles", "shades", "shadow", "shadows", "shaft", "shaken", "shakes", "shaking", "shall", "sham", "shame", "shameful", "shaneyfelt", "shanklin", "shape", "shaped", "shapeliness", "shaping", "share", "shared", "shares", "sharings", "sharp", "sharpened", "sharply", "sharpshooter", "shattered", "shaw", "shawl", "she", "shed", "sheep", "sheet", "sheets", "shell", "shelley", "shells", "sheriff", "sheriffs", "shield", "shift", "shifts", "shilling", "shillings", "ship", "shipped", "shipping", "ships", "shipwrecks", "shirt", "shirts", "shock", "shocked", "shocking", "shockingly", "shoe", "shoemaker", "shoemaking", "shoes", "shoestore", "shook", "shoot", "shooting", "shop", "shops", "shore", "shores", "short", "shortcomings", "shortened", "shorter", "shortly", "shot", "shotgun", "shots", "should", "shoulder", "shoulders", "shouldnt", "shout", "shouted", "shouting", "shouts", "shovel", "shoving", "show", "showed", "showing", "shown", "shows", "showy", "shrewd", "shrewsbury", "shrigley", "shrink", "shriveled", "shropshire", "shrubbery", "shrunk", "shudder", "shut", "shutters", "shutting", "sick", "sickening", "sickness", "side", "sided", "sides", "sidewalk", "sideways", "sidmouth", "siege", "siegel", "sift", "sifted", "sight", "sighted", "sighting", "sights", "sign", "signal", "signature", "signatures", "signed", "significance", "significant", "signs", "silence", "silent", "silhouette", "silk", "silly", "silver", "silversmiths", "similar", "similarities", "similarity", "similarly", "simmons", "simple", "simpler", "simplest", "simpleton", "simply", "simulated", "simultaneously", "sinacherib", "since", "sincere", "sincerely", "sine", "sing", "singing", "single", "singleness", "sink", "sinking", "sinks", "sinner", "sins", "sir", "sirens", "sister", "sit", "site", "sitting", "sittings", "situated", "situation", "situations", "six", "sixpence", "sixpenny", "sixteen", "sixteenth", "sixth", "sixty", "sizable", "size", "sized", "sizes", "skeletal", "skeleton", "sketch", "skilful", "skill", "skilled", "skillful", "skin", "skirt", "skittles", "skull", "sky", "slack", "slacken", "slackness", "slacks", "slain", "slanting", "slaughterhouses", "slavery", "slaves", "slayer", "sleep", "sleeper", "sleepers", "sleeping", "sleeve", "sleeves", "slender", "slept", "slew", "slide", "slight", "slightest", "slightly", "sligo", "slip", "slipped", "slipping", "slipshod", "slop", "sloping", "slovenly", "slow", "slowed", "slowly", "small", "smaller", "smallest", "smart", "smartly", "smell", "smethurst", "smethursts", "smiled", "smiles", "smirking", "smith", "smithfield", "smiths", "smoke", "smoked", "smoking", "smoldering", "smooth", "smug", "smuggled", "smugglers", "snail", "snakes", "snapping", "snatched", "snatcher", "snatchers", "snatches", "sneered", "snow", "snuff", "so", "soames", "soap", "sobbing", "sober", "sobriety", "social", "socialist", "society", "societys", "soda", "soft", "soil", "sokolow", "sold", "soldier", "soldiers", "sole", "solely", "solemn", "solemnity", "soles", "solicited", "solicitor", "solid", "solidity", "solitary", "solitude", "solomons", "solution", "some", "somebody", "someone", "something", "sometime", "sometimes", "somewhat", "somewhere", "son", "song", "songs", "sons", "soon", "sooner", "sophisticated", "sorrels", "sorrow", "sorrowful", "sorry", "sort", "sorts", "sought", "soul", "sound", "sounded", "soundly", "sounds", "soup", "source", "sources", "south", "southampton", "southeast", "southern", "southwark", "southwest", "southwesterly", "sovereign", "sovereigns", "soviet", "soviets", "space", "spaced", "spaces", "spacing", "spacious", "span", "spaniards", "spanish", "spare", "spared", "sparing", "sparrow", "spasmodic", "speak", "speaker", "speakers", "speaking", "speaks", "special", "specialist", "specialization", "specially", "specie", "species", "specific", "specifically", "specified", "specify", "specimen", "spectacle", "spectators", "speculation", "speculations", "speculator", "speech", "speed", "speedily", "speeding", "speeds", "speedy", "spelling", "spencer", "spend", "spent", "spikes", "spiky", "spilled", "spine", "spinning", "spires", "spirit", "spirits", "spiritual", "spirituous", "spit", "spite", "splendid", "spoil", "spoke", "spoken", "spokes", "sponge", "sponsor", "spontaneity", "spoon", "spoons", "sport", "sporting", "spot", "spread", "spring", "springs", "spurious", "spy", "squalid", "squalor", "square", "squarely", "squatting", "squeeze", "st", "stab", "stabbed", "stable", "stables", "stack", "stacked", "stadium", "staff", "staffordshire", "stage", "stages", "staggers", "stagnant", "stain", "stained", "staircase", "staircases", "stairs", "stairway", "stairwell", "stake", "stalls", "stamp", "stamped", "stamps", "stand", "standard", "standards", "standing", "standpoint", "stands", "star", "starch", "starchy", "stare", "stared", "stars", "start", "started", "starting", "startled", "starvation", "starve", "starved", "starving", "state", "stated", "statement", "statements", "states", "stating", "station", "stationed", "stationers", "statistics", "stature", "status", "statute", "statutory", "stauntons", "staves", "stay", "stayed", "staying", "steadfastly", "steadily", "steady", "steal", "stealer", "stealing", "steam", "steamer", "steele", "stem", "stemmons", "stench", "step", "stepped", "stepping", "steps", "sternly", "stevenson", "steward", "stick", "sticking", "stiff", "stiffly", "stigmatized", "still", "stillness", "stimulants", "stimulated", "stimulating", "stimuli", "stint", "stir", "stirred", "stock", "stockdale", "stockings", "stocks", "stole", "stolen", "stomach", "stomata", "stombaugh", "stombaughs", "stone", "stones", "stood", "stool", "stooped", "stop", "stopped", "stopping", "stops", "stopwatch", "storage", "store", "stored", "stores", "stories", "storm", "stormy", "stortford", "story", "stout", "stoutly", "strahan", "straight", "strain", "strained", "strand", "strange", "strangely", "stranger", "strangers", "strangled", "strangulation", "strap", "straps", "strasburg", "stratagem", "strategy", "straw", "streak", "streaks", "stream", "street", "streets", "strength", "strengthen", "strengthened", "strengthening", "strenuously", "stressed", "stretch", "stretcher", "stretchers", "stretches", "stretching", "strict", "strictly", "strictures", "strike", "striking", "string", "stringent", "strings", "strip", "striped", "stripped", "stroke", "strong", "stronger", "strongest", "strongly", "strove", "struck", "structural", "structure", "structures", "struggle", "struggled", "struggles", "struggling", "strutton", "strychnia", "stuckey", "studded", "students", "studied", "studies", "study", "studying", "stuff", "stuffs", "stumbled", "stumbles", "stupidity", "sturges", "style", "styled", "sub", "subdue", "subdued", "subiaco", "subject", "subjected", "subjects", "submission", "submit", "submitted", "subordinate", "subordinates", "subscribe", "subscribed", "subscription", "subscriptions", "subsequent", "subsequently", "subsided", "substance", "substances", "substantial", "substantially", "substantiated", "substitute", "substituted", "suburban", "suburbs", "subversive", "succeed", "succeeded", "succeeding", "success", "successful", "successfully", "succession", "successive", "successively", "successor", "successors", "succumbed", "such", "sucked", "sudden", "suddenly", "sue", "sued", "suffer", "suffered", "sufferer", "suffering", "sufferings", "suffice", "sufficient", "sufficiently", "suffocated", "suffocation", "suffolk", "sugar", "suggest", "suggested", "suggesting", "suggestion", "suggestions", "suggestive", "suggests", "suicide", "suicides", "suit", "suitable", "suited", "sullivan", "sullivans", "sum", "summarized", "summary", "summed", "summer", "summing", "summit", "summoned", "sums", "sun", "sunday", "sundays", "sung", "sunk", "sunlight", "sunshine", "superficial", "superfluous", "superimposed", "superintendence", "superintendent", "superior", "superiors", "superseded", "supervised", "supervising", "supervision", "supervisor", "supped", "supplement", "supplied", "supplies", "supply", "supplying", "support", "supported", "supporters", "supporting", "supports", "suppose", "supposed", "supposing", "supposition", "suppress", "suppression", "suppuration", "supreme", "sure", "surely", "surface", "surgeon", "surgeons", "surgery", "surgical", "surmounted", "surpassed", "surplus", "surprise", "surprised", "surprising", "surrender", "surrendered", "surrounded", "surrounding", "surroundings", "surveillance", "survey", "surveying", "surveyor", "surveys", "survival", "survive", "survived", "susannah", "suspect", "suspects", "suspended", "suspense", "suspension", "suspicion", "suspicions", "suspicious", "sustained", "sutherland", "sutton", "suturing", "swab", "swandown", "swear", "swearing", "sweep", "sweeper", "sweeping", "sweet", "sweethearts", "swelling", "swellings", "swept", "sweynheim", "swimming", "swindler", "swindlers", "swing", "swinging", "sword", "swords", "swore", "sworn", "sydney", "sympathies", "sympathy", "symptoms", "system", "systematic", "systematically", "systems", "t", "table", "tables", "tablespoonful", "tablespoonfuls", "tactics", "tadpole", "taffir", "taft", "tagus", "tail", "tailor", "tailoring", "tailors", "take", "taken", "taker", "takers", "takes", "taking", "tale", "talent", "tales", "talk", "talked", "talking", "talks", "tall", "tampa", "tangible", "tap", "tape", "tapster", "target", "targets", "tarpey", "tarpeys", "task", "tasks", "taste", "tastes", "tattered", "taught", "tavern", "taverns", "tax", "taxed", "taxes", "taxi", "taxicab", "taylor", "tea", "teach", "teaching", "team", "tear", "tearing", "teased", "teaspoonful", "teaspoonfuls", "technical", "techniques", "technological", "technology", "ted", "tedious", "teenagers", "teeth", "teleological", "telephone", "telephoned", "telescopic", "television", "tell", "telling", "tells", "temper", "temperature", "tempered", "temple", "temples", "temporarily", "temporary", "temptation", "temptations", "tempted", "ten", "tenants", "tend", "tended", "tendencies", "tendency", "tender", "tenderness", "tending", "tennis", "tensions", "tent", "tentative", "tentatively", "tenth", "tenths", "term", "terminal", "terminated", "termination", "terms", "terrace", "terrible", "terrified", "territory", "terror", "terrors", "test", "tested", "tester", "testified", "testify", "testifying", "testimony", "tests", "tetanic", "tetanus", "texas", "text", "texture", "thain", "thames", "than", "thank", "thankfulness", "thanks", "thanksgiving", "thanksgivings", "that", "the", "theatre", "theatres", "theatrical", "thee", "theft", "thefts", "their", "theirs", "them", "theme", "themselves", "then", "thence", "theodore", "theological", "theory", "there", "thereabout", "thereafter", "thereby", "therefore", "therein", "thereof", "thereon", "thereto", "thereupon", "these", "they", "thick", "thickening", "thickness", "thief", "thievery", "thieves", "thieving", "thigh", "thin", "thine", "thing", "things", "think", "thinking", "thinness", "thinning", "third", "thirstiness", "thirteen", "thirteenpence", "thirties", "thirtieth", "thirty", "this", "thistlewood", "thistlewoods", "thither", "thomas", "thornberry", "thornley", "thorough", "thoroughly", "those", "thou", "though", "thought", "thoughtful", "thoughts", "thousand", "thousands", "thread", "threading", "threat", "threatened", "threatening", "threats", "three", "threw", "thrice", "thrilling", "throat", "throes", "throne", "throng", "thronged", "through", "throughout", "throw", "throwing", "thrown", "throws", "thrust", "thursday", "thurtell", "thus", "thwart", "thy", "ticket", "tickets", "tidd", "tidy", "tie", "tied", "tiers", "ties", "tight", "tightly", "tigris", "till", "tilling", "timber", "time", "timed", "timely", "times", "tiny", "tippit", "tippits", "tired", "tissue", "tissues", "title", "to", "tobacco", "tobacconist", "tocqueville", "today", "todays", "toe", "toed", "toes", "together", "toil", "tokens", "told", "tolerable", "tolerate", "toll", "tolls", "tom", "tomorrow", "tone", "tones", "tonight", "too", "took", "tool", "tools", "top", "topics", "tops", "tore", "tormented", "tormentor", "tormentors", "torn", "tossing", "total", "totally", "touch", "touched", "touching", "tough", "toughness", "tour", "touts", "toward", "towards", "towel", "towels", "tower", "towers", "town", "towns", "township", "trace", "traced", "traces", "tracheotomy", "track", "tracked", "tracks", "tracts", "trade", "trader", "traders", "trades", "trading", "tradition", "traditional", "traditions", "traffic", "tragedian", "tragedy", "tragic", "train", "trained", "training", "trait", "traitors", "tramped", "trampled", "transaction", "transactions", "transcript", "transfer", "transferred", "transferring", "transfers", "transient", "transit", "transition", "transitional", "translated", "transmitted", "transmitting", "transparent", "transport", "transportation", "transported", "transports", "trap", "trauma", "travel", "traveled", "travelers", "traveling", "travels", "traversed", "traversing", "tray", "tread", "treading", "treason", "treasonable", "treasury", "treat", "treated", "treating", "treatment", "treble", "trebled", "trees", "tremble", "trembled", "trembles", "tremendous", "trenchant", "trenched", "trial", "trials", "triangle", "tribunals", "tribute", "trick", "trickled", "trickling", "tried", "tries", "trifle", "trifling", "trigger", "trip", "triple", "tripped", "trips", "trivial", "trodden", "troops", "trot", "trouble", "trousers", "truancy", "truck", "truculent", "true", "truly", "trump", "trunk", "truss", "trust", "trusted", "trustworthy", "trusty", "truth", "try", "trying", "tub", "tube", "tubes", "tubs", "tucked", "tuft", "tumult", "tumultuous", "turf", "turk", "turkey", "turn", "turned", "turner", "turners", "turning", "turnkey", "turnkeys", "turnpike", "turns", "turtle", "tweed", "twelfth", "twelve", "twenties", "twenty", "twice", "twist", "two", "tyburn", "tying", "tylers", "type", "types", "typhus", "typical", "typography", "tyrannies", "u", "udalric", "ugliness", "ugly", "ulm", "ultimate", "ultimately", "ultra", "unable", "unaccompanied", "unaffected", "unaffectedly", "unanimous", "unanswerable", "unanswered", "unattainable", "unauthorized", "unavailing", "unbecoming", "unbroken", "unceasing", "uncertain", "uncertainty", "unchanged", "unchecked", "unclaimed", "uncle", "uncleanliness", "uncleanly", "uncleanness", "unclothed", "uncomfortable", "uncommon", "uncompromising", "unconcern", "unconscious", "unconsciously", "unconstitutional", "uncontaminated", "uncontrolled", "unconvicted", "uncovered", "undecided", "undefended", "undeniable", "under", "underfoot", "undergo", "undergoing", "undergone", "underground", "underlying", "undermine", "underpass", "underside", "understand", "understanding", "understood", "undertake", "undertaken", "undertaker", "undertaking", "underway", "underwriters", "undesirable", "undeterred", "undisturbed", "undone", "undoubtedly", "undue", "undulate", "unduly", "uneasiness", "uneasy", "unemployed", "unemployment", "unequal", "unequivocally", "uneventful", "unexamined", "unexpected", "unexpectedly", "unfair", "unfairly", "unfavorable", "unfeeling", "unfit", "unflagging", "unflinching", "unfortunate", "unfortunately", "unfrequent", "ungovernable", "unhappily", "unhappy", "unhealthy", "uniform", "uniformity", "uninteresting", "union", "unique", "unison", "unit", "united", "units", "unity", "universal", "universally", "universe", "university", "unjust", "unknown", "unlawful", "unless", "unlike", "unlikely", "unlimited", "unloaded", "unlocked", "unlocking", "unmanly", "unmarked", "unmindful", "unmistakably", "unmixed", "unmoved", "unnatural", "unnecessarily", "unnecessary", "unnerve", "unnoticed", "unoccupied", "unparalleled", "unpleasant", "unpopular", "unpracticed", "unprotected", "unprovided", "unquote", "unrelenting", "unremedied", "unremitting", "unreservedly", "unrestrained", "unrestricted", "unsafe", "unsatisfactory", "unscrupulous", "unsearched", "unseemly", "unsexed", "unskilful", "unsound", "unsoundness", "unstable", "unsubstantiated", "unsuccessful", "unsuccessfully", "unsuited", "unsupervised", "until", "unto", "untouched", "untoward", "untried", "unusual", "unusually", "unventilated", "unwarrantable", "unwary", "unwholesome", "unwilling", "unworthy", "up", "upon", "upper", "uppermost", "upright", "uprights", "uproar", "uproarious", "upset", "upside", "upstairs", "upward", "upwards", "urge", "urged", "urgent", "urgently", "urges", "urging", "us", "usa", "usage", "use", "used", "useful", "useless", "uses", "using", "ussr", "usual", "usually", "usurped", "utilitarian", "utility", "utilize", "utilized", "utilizing", "utmost", "utter", "uttered", "uttering", "utterly", "utters", "v", "vacated", "vagrants", "vague", "vain", "valet", "valets", "valid", "validity", "valley", "valuable", "valuables", "value", "valued", "van", "vanished", "vanishes", "vans", "vantage", "variation", "variations", "varied", "variety", "various", "vartos", "vary", "varying", "vast", "vastly", "vault", "vaulted", "vaunting", "vegetable", "vegetables", "vehicle", "vehicles", "veil", "vein", "vended", "vendor", "vendors", "venetian", "venice", "ventilating", "ventilation", "ventilators", "verbal", "verdict", "verified", "verify", "versed", "vertebral", "vertebrate", "vertebrates", "vertically", "very", "vessel", "vessels", "vested", "vestibule", "vestiges", "vexed", "via", "viaduct", "vice", "vicinity", "vicious", "vicissitudes", "victim", "victims", "victoria", "victuals", "view", "viewed", "viewpoint", "views", "vigilance", "vigorous", "vigorously", "vile", "vileness", "vilest", "vilification", "village", "villain", "villains", "vindelin", "vines", "violated", "violation", "violations", "violence", "violent", "violently", "virginia", "virtually", "virtue", "virtuous", "visa", "visas", "viscose", "visible", "vision", "visit", "visitation", "visitations", "visited", "visiting", "visitor", "visitors", "visits", "visual", "vital", "vitiate", "vitriol", "viz", "vociferously", "voebel", "voice", "voices", "volume", "voluminous", "voluntarily", "voluntary", "volunteer", "vomiting", "vote", "vouchsafed", "vowed", "voyage", "vulgar", "w", "wage", "wages", "wagner", "wagon", "wainwright", "wainwrights", "waist", "waistcoat", "wait", "waited", "waiters", "waiting", "wakefield", "wakefields", "waking", "wales", "walk", "walked", "walker", "walkers", "walking", "wall", "wallace", "wallaces", "walled", "wallet", "walls", "walsall", "walter", "walthers", "wandering", "wanders", "wandsworth", "want", "wanted", "wanting", "wantonness", "war", "ward", "warden", "warder", "warders", "wards", "wardsman", "wardsmen", "warehouse", "warehouses", "wares", "warm", "warmed", "warmly", "warned", "warning", "warnings", "warrant", "warrants", "warren", "warwick", "was", "wash", "washed", "washing", "washington", "wasnt", "waste", "wasted", "watch", "watched", "watches", "watching", "watchman", "water", "watercourse", "waterloo", "waters", "watery", "watkin", "watkins", "watson", "watts", "wave", "wax", "way", "ways", "wdsu", "we", "weak", "weakened", "weaker", "weakest", "weakly", "wealth", "wealthy", "weapon", "weapons", "wear", "weare", "wearer", "wearing", "weary", "weather", "weavers", "weaving", "webster", "wedded", "wedding", "wednesday", "wednesdays", "weed", "week", "weekend", "weekly", "weeks", "weigh", "weighed", "weight", "weighted", "weights", "welcome", "welcomed", "welfare", "well", "welshman", "went", "were", "wesley", "wesson", "west", "westbrook", "western", "westminster", "whale", "whaley", "whaleys", "wharf", "what", "whatever", "wheat", "wheel", "wheeled", "wheeler", "wheels", "when", "whence", "whenever", "where", "whereabouts", "whereas", "whereof", "whereupon", "wherever", "whether", "which", "while", "whiling", "whilst", "whisper", "whispered", "whistles", "whistling", "whitchurch", "white", "whitecross", "whitehall", "whites", "whitewashed", "whitewashing", "whither", "whitworth", "who", "whoever", "whole", "wholesale", "wholesome", "wholly", "whom", "whomsoever", "whose", "why", "wickedness", "wide", "widely", "wider", "widespread", "widow", "widower", "widows", "width", "wife", "wifes", "wig", "wilberforce", "wild", "wildest", "will", "willful", "william", "williams", "willing", "willingly", "willingness", "willoughby", "wills", "wilson", "winchester", "wind", "winded", "winding", "windlass", "window", "windows", "windowsill", "winds", "windsor", "wine", "wing", "wings", "winked", "winston", "winter", "wiped", "wire", "wiry", "wisdom", "wise", "wiser", "wish", "wished", "wishes", "wishing", "with", "withdrawal", "withdrawing", "withdrawn", "withdraws", "withdrew", "within", "without", "witness", "witnessed", "witnesses", "wittmus", "wives", "woebegone", "woman", "womans", "women", "womens", "won", "wonder", "wonders", "wont", "wood", "woodcuts", "woodcutters", "wooden", "woodstock", "woody", "wool", "woolen", "woolwich", "word", "worde", "words", "wore", "work", "worked", "worker", "workers", "workhouse", "working", "workman", "workmen", "works", "workshops", "world", "worn", "worry", "worse", "worship", "worst", "worth", "worthy", "would", "wouldnt", "wound", "wounded", "wounds", "wrapped", "wrapper", "wrapping", "wrestling", "wretch", "wretched", "wretchedness", "wretches", "wrist", "wrists", "writ", "write", "writer", "writers", "writes", "writing", "writings", "writs", "written", "wrong", "wronged", "wrongfully", "wrongs", "wrote", "wrought", "wynkyn", "wynn", "x", "yarborough", "yard", "yards", "yarmouth", "ye", "year", "years", "yeast", "yell", "yelled", "yellow", "yells", "yeoman", "yes", "yesterday", "yet", "yield", "yoke", "yolk", "york", "yorks", "yorkshire", "you", "youll", "young", "youngblood", "youngbloods", "younger", "youngest", "youngster", "your", "yourself", "youth", "youths", "zahm", "zapruder", "zeal", "zeiner", "zeiners", "zero", "zipper", "zoology", "zopyrus", "zulueta"] \ No newline at end of file diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/config.yaml b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f26046f440d1e6fbc04aaaa3257a769ec89be9f7 --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/config.yaml @@ -0,0 +1,280 @@ +accumulate_grad_batches: 1 +add_word_pos: true +amp: false +audio_num_mel_bins: 80 +audio_sample_rate: 22050 +base_config: +- egs/datasets/audio/lj/base_text2mel.yaml +- egs/egs_bases/tts/ps_flow.yaml +binarization_args: + min_sil_duration: 0.1 + shuffle: false + test_range: + - 0 + - 523 + train_range: + - 871 + - -1 + trim_eos_bos: false + valid_range: + - 523 + - 871 + with_align: true + with_f0: true + with_f0cwt: false + with_linear: false + with_spk_embed: false + with_wav: false +binarizer_cls: data_gen.tts.base_binarizer.BaseBinarizer +binary_data_dir: data/binary/ljspeech_clipspeech +check_val_every_n_epoch: 10 +clip_grad_norm: 1 +clip_grad_value: 0 +conv_use_pos: false +debug: false +dec_dilations: +- 1 +- 1 +- 1 +- 1 +dec_ffn_kernel_size: 9 +dec_inp_add_noise: false +dec_kernel_size: 5 +dec_layers: 4 +dec_post_net_kernel: 3 +decoder_rnn_dim: 0 +decoder_type: conv +detach_postflow_input: true +disc_interval: 1 +disc_lr: 0.0001 +disc_norm: in +disc_reduction: stack +disc_start_steps: 0 +disc_win_num: 3 +discriminator_optimizer_params: + eps: 1.0e-06 + weight_decay: 0.0 +discriminator_scheduler_params: + gamma: 0.5 + step_size: 40000 +dropout: 0.0 +ds_name: ljspeech +ds_workers: 1 +dur_level: word +dur_predictor_kernel: 5 +dur_predictor_layers: 3 +enc_dec_norm: ln +enc_dilations: +- 1 +- 1 +- 1 +- 1 +enc_ffn_kernel_size: 5 +enc_kernel_size: 5 +enc_layers: 4 +enc_post_net_kernel: 3 +enc_pre_ln: true +enc_prenet: true +encoder_K: 8 +encoder_type: rel_fft +endless_ds: true +eval_max_batches: -1 +f0_max: 600 +f0_min: 80 +ffn_act: gelu +ffn_hidden_size: 768 +fft_size: 1024 +fmax: 7600 +fmin: 80 +frames_multiple: 4 +fvae_dec_n_layers: 4 +fvae_decoder_type: conv +fvae_enc_dec_hidden: 192 +fvae_enc_n_layers: 8 +fvae_encoder_type: conv +fvae_kernel_size: 5 +fvae_noise_scale: 1.0 +fvae_strides: 4 +gen_dir_name: '' +griffin_lim_iters: 30 +hidden_size: 192 +hop_size: 256 +infer: false +infer_post_glow: true +kl_min: 0.0 +kl_start_steps: 10000 +lambda_commit: 0.25 +lambda_energy: 0.1 +lambda_f0: 1.0 +lambda_kl: 1.0 +lambda_mel_adv: 0.05 +lambda_ph_dur: 0.1 +lambda_sent_dur: 0.0 +lambda_uv: 1.0 +lambda_word_dur: 1.0 +latent_size: 16 +layers_in_block: 2 +load_ckpt: '' +loud_norm: false +lr: 0.4 +max_epochs: 1000 +max_frames: 1548 +max_input_tokens: 1550 +max_sentences: 64 +max_tokens: 40000 +max_updates: 160001 +max_valid_sentences: 1 +max_valid_tokens: 60000 +mel_disc_hidden_size: 128 +mel_losses: l1:0.5|ssim:0.5 +mel_vmax: 1.5 +mel_vmin: -6 +min_frames: 0 +noise_scale: 0.8 +num_cache: 20000 +num_ckpt_keep: 2 +num_heads: 2 +num_sanity_val_steps: 0 +num_spk: 1 +num_valid_plots: 30 +optimizer_adam_beta1: 0.9 +optimizer_adam_beta2: 0.98 +out_wav_norm: false +pe_ffn_padding: SAME +pe_hidden_size: 256 +pe_predictor_hidden: -1 +pe_predictor_kernel: 5 +pitch_extractor: parselmouth +pitch_extractor_ckpt: checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt +pitch_key: pitch +pitch_norm: log +pitch_type: frame +post_flow_lr: 0.001 +post_glow_hidden: 192 +post_glow_kernel_size: 3 +post_glow_n_block_layers: 3 +post_glow_n_blocks: 12 +post_glow_training_start: 160000 +post_share_cond_layers: false +posterior_start_steps: 0 +predictor_dropout: 0.2 +predictor_grad: 0.02 +predictor_hidden: -1 +predictor_kernel: 5 +predictor_layers: 5 +preprocess_args: + add_eos_bos: true + mfa_group_shuffle: false + mfa_offset: 0.02 + nsample_per_mfa_group: 1000 + reset_phone_dict: true + reset_word_dict: true + save_sil_mask: true + txt_processor: en + use_mfa: true + vad_max_silence_length: 12 + wav_processors: [] + with_phsep: true +preprocess_cls: text_to_speech.egs.datasets.audio.lj.preprocess.LJPreprocess +print_nan_grads: false +prior_flow_hidden: 64 +prior_flow_kernel_size: 3 +prior_flow_n_blocks: 4 +processed_data_dir: data/processed/ljspeech +profile_infer: false +raw_data_dir: data/raw/LJSpeech-1.1 +ref_norm_layer: bn +rename_tmux: true +resume_from_checkpoint: 0 +save_best: false +save_codes: +- tasks +- modules +- egs +- research +save_f0: false +save_gt: true +scheduler: rsqrt +seed: 1234 +share_wn_layers: 4 +sigmoid_scale: false +sort_by_len: true +task_cls: research.clipspeech.tasks.ps_adv.PortaSpeechAdvTask +tb_log_interval: 100 +test_ids: +- 0 +- 1 +- 2 +- 3 +- 4 +- 5 +- 6 +- 7 +- 8 +- 9 +- 10 +- 11 +- 12 +- 13 +- 14 +- 15 +- 16 +- 17 +- 18 +- 19 +- 68 +- 70 +- 74 +- 87 +- 110 +- 172 +- 190 +- 215 +- 231 +- 294 +- 316 +- 324 +- 402 +- 422 +- 485 +- 500 +- 505 +- 508 +- 509 +- 519 +test_input_yaml: '' +test_num: 100 +test_set_name: test +text_encoder_postnet: true +train_set_name: train +train_sets: '' +two_stage: true +use_cond_proj: false +use_fvae: true +use_gt_dur: false +use_gt_f0: false +use_latent_cond: false +use_pitch_embed: false +use_pos_embed: true +use_post_flow: true +use_prior_flow: true +use_spk_embed: false +use_spk_id: false +use_txt_cond: true +use_uv: true +use_word_encoder: false +use_word_input: false +val_check_interval: 1 +valid_infer_interval: 1 +valid_monitor_key: val_loss +valid_monitor_mode: min +valid_set_name: valid +vocoder: HifiGAN +vocoder_ckpt: checkpoints/hifi_lj +warmup_updates: 8000 +weight_decay: 0 +win_size: 1024 +word_dict_size: 10000 +word_enc_layers: 4 +word_encoder_type: rel_fft +work_dir: checkpoints/ljspeech/ps_adv_baseline diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/model_ckpt_steps_160000.ckpt b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/model_ckpt_steps_160000.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..af47e60ed55e7407cafc675044fe276cb59b8e4f --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/model_ckpt_steps_160000.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c9703b21cdbb5434af4c3c8ece930c5640d077c21d441bb86f398c596c49b8b +size 287145604 diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/model_ckpt_steps_160001.ckpt b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/model_ckpt_steps_160001.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..c1ada75c47ffe462ab7d2eba88855591f62dde33 --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/model_ckpt_steps_160001.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:256f02b8dfee462a84279d5e4b2ef7eb953ed22038563b413e0488589fc3f38b +size 287122802 diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670317738.yw52.34246.0 b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670317738.yw52.34246.0 new file mode 100644 index 0000000000000000000000000000000000000000..3a1491aa4c71ad51d54ddc6ab619bffc44d8972e --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670317738.yw52.34246.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa3b818dcbb46391fea94c1bff4c1b0be6f9dc9cc1e1698023b276c45631209b +size 40 diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670318794.yw52.20660.0 b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670318794.yw52.20660.0 new file mode 100644 index 0000000000000000000000000000000000000000..c419d85300cdeda8a44c210d8a29918a23340635 --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670318794.yw52.20660.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55fc9f3a68708d206c938200c9eac1055072afad4b6762a97234b65ff752635b +size 40 diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670339314.yw52.20115.0 b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670339314.yw52.20115.0 new file mode 100644 index 0000000000000000000000000000000000000000..760194c7aba77586504b1d02d0037c1e43e4fccf --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670339314.yw52.20115.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b8e3d6b78feada0c7cfe33288d37dea2b31832a280ef087f337492075fcba28 +size 95056 diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670374945.yw52.28480.0 b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670374945.yw52.28480.0 new file mode 100644 index 0000000000000000000000000000000000000000..d5c31b4c024fa17b6b5d0d1f4d8ffc113f5ed286 --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670374945.yw52.28480.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a21bbc0ab034bf36b92f8285ce632ced92262932e85f1d830237beda315c7038 +size 64827392 diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670397406.yw52.15473.0 b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670397406.yw52.15473.0 new file mode 100644 index 0000000000000000000000000000000000000000..781a6d99aa882df33a4da241f7b5c00988743648 --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670397406.yw52.15473.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d7fdcb5455a54f96a791c545cd31bcbed92a6bbd5c403220b6bce9ca8292580 +size 185837950 diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670844284.mk-SYS-7048GR-TR.19039.0 b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670844284.mk-SYS-7048GR-TR.19039.0 new file mode 100644 index 0000000000000000000000000000000000000000..60b374d6f084c661c9666701d4acdaaa7a3e80cb --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/tb_logs/events.out.tfevents.1670844284.mk-SYS-7048GR-TR.19039.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d12aa2b68a03a8eea200591a467fb306a653cdd5cd98d66ef1b5b63b9be26b98 +size 45507803 diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221206170856.txt b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221206170856.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d42e8deb70ff19e3cebfb6177f2a27115bc93d3 --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221206170856.txt @@ -0,0 +1,625 @@ +| Copied codes to checkpoints/1206_lj/ps_adv_baseline/codes/20221206170856. +| model Arch: PortaSpeech( + (dur_predictor): DurationPredictor( + (conv): ModuleList( + (0): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + (1): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + (2): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + ) + (linear): Sequential( + (0): Linear(in_features=192, out_features=1, bias=True) + (1): Softplus(beta=1, threshold=20) + ) + ) + (length_regulator): LengthRegulator() + (ph_encoder): RelTransformerEncoder( + (emb): Embedding(79, 192, padding_idx=0) + (pre): ConvReluNorm( + (conv_layers): ModuleList( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + ) + (norm_layers): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + ) + (relu_drop): Sequential( + (0): ReLU() + (1): Dropout(p=0, inplace=False) + ) + (proj): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + ) + (encoder): Encoder( + (drop): Dropout(p=0.0, inplace=False) + (attn_layers): ModuleList( + (0): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_1): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (ffn_layers): ModuleList( + (0): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_2): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (last_ln): LayerNorm() + ) + ) + (ph2word_encoder): RelTransformerEncoder( + (pre): ConvReluNorm( + (conv_layers): ModuleList( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + ) + (norm_layers): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + ) + (relu_drop): Sequential( + (0): ReLU() + (1): Dropout(p=0, inplace=False) + ) + (proj): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + ) + (encoder): Encoder( + (drop): Dropout(p=0.0, inplace=False) + (attn_layers): ModuleList( + (0): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_1): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (ffn_layers): ModuleList( + (0): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_2): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (last_ln): LayerNorm() + ) + ) + (sin_pos): SinusoidalPosEmb() + (enc_pos_proj): Linear(in_features=384, out_features=192, bias=True) + (dec_query_proj): Linear(in_features=384, out_features=192, bias=True) + (dec_res_proj): Linear(in_features=384, out_features=192, bias=True) + (attn): MultiheadAttention( + (out_proj): Linear(in_features=192, out_features=192, bias=False) + ) + (text_encoder_postnet): ConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (fvae): FVAE( + (g_pre_net): Sequential( + (0): Conv1d(192, 192, kernel_size=(8,), stride=(4,), padding=(2,)) + ) + (encoder): FVAEEncoder( + (pre_net): Sequential( + (0): Conv1d(80, 192, kernel_size=(8,), stride=(4,), padding=(2,)) + ) + (nn): ConditionalConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (3): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (4): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (5): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (6): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (7): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + (g_prenet): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (out_proj): Conv1d(192, 32, kernel_size=(1,), stride=(1,)) + ) + (prior_flow): ResFlow( + (flows): ModuleList( + (0): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (1): FlipLayer() + (2): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (3): FlipLayer() + (4): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (5): FlipLayer() + (6): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (7): FlipLayer() + ) + ) + (decoder): FVAEDecoder( + (pre_net): Sequential( + (0): ConvTranspose1d(16, 192, kernel_size=(4,), stride=(4,)) + ) + (nn): ConditionalConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (3): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + (g_prenet): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (out_proj): Conv1d(192, 80, kernel_size=(1,), stride=(1,)) + ) + ) + (word_pos_proj): Linear(in_features=192, out_features=192, bias=True) +) +| model Trainable Parameters: 23.020M +| dur_predictor Trainable Parameters: 0.555M +| length_regulator Trainable Parameters: 0.000M +| ph_encoder Trainable Parameters: 4.753M +| ph2word_encoder Trainable Parameters: 2.081M +| sin_pos Trainable Parameters: 0.000M +| enc_pos_proj Trainable Parameters: 0.074M +| dec_query_proj Trainable Parameters: 0.074M +| dec_res_proj Trainable Parameters: 0.074M +| attn Trainable Parameters: 0.147M +| text_encoder_postnet Trainable Parameters: 2.771M +| fvae Trainable Parameters: 12.453M +| word_pos_proj Trainable Parameters: 0.037M +| fvae.g_pre_net Trainable Parameters: 0.295M +| fvae.encoder Trainable Parameters: 7.444M +| fvae.prior_flow Trainable Parameters: 0.917M +| fvae.decoder Trainable Parameters: 3.796M diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221206172630.txt b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221206172630.txt new file mode 100644 index 0000000000000000000000000000000000000000..b37895d3404b990b1eb13d678c99e580874b605f --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221206172630.txt @@ -0,0 +1,625 @@ +| Copied codes to checkpoints/1206_lj/ps_adv_baseline/codes/20221206172630. +| model Arch: PortaSpeech( + (dur_predictor): DurationPredictor( + (conv): ModuleList( + (0): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + (1): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + (2): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + ) + (linear): Sequential( + (0): Linear(in_features=192, out_features=1, bias=True) + (1): Softplus(beta=1, threshold=20) + ) + ) + (length_regulator): LengthRegulator() + (ph_encoder): RelTransformerEncoder( + (emb): Embedding(79, 192, padding_idx=0) + (pre): ConvReluNorm( + (conv_layers): ModuleList( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + ) + (norm_layers): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + ) + (relu_drop): Sequential( + (0): ReLU() + (1): Dropout(p=0, inplace=False) + ) + (proj): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + ) + (encoder): Encoder( + (drop): Dropout(p=0.0, inplace=False) + (attn_layers): ModuleList( + (0): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_1): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (ffn_layers): ModuleList( + (0): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_2): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (last_ln): LayerNorm() + ) + ) + (ph2word_encoder): RelTransformerEncoder( + (pre): ConvReluNorm( + (conv_layers): ModuleList( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + ) + (norm_layers): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + ) + (relu_drop): Sequential( + (0): ReLU() + (1): Dropout(p=0, inplace=False) + ) + (proj): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + ) + (encoder): Encoder( + (drop): Dropout(p=0.0, inplace=False) + (attn_layers): ModuleList( + (0): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_1): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (ffn_layers): ModuleList( + (0): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_2): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (last_ln): LayerNorm() + ) + ) + (sin_pos): SinusoidalPosEmb() + (enc_pos_proj): Linear(in_features=384, out_features=192, bias=True) + (dec_query_proj): Linear(in_features=384, out_features=192, bias=True) + (dec_res_proj): Linear(in_features=384, out_features=192, bias=True) + (attn): MultiheadAttention( + (out_proj): Linear(in_features=192, out_features=192, bias=False) + ) + (text_encoder_postnet): ConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (fvae): FVAE( + (g_pre_net): Sequential( + (0): Conv1d(192, 192, kernel_size=(8,), stride=(4,), padding=(2,)) + ) + (encoder): FVAEEncoder( + (pre_net): Sequential( + (0): Conv1d(80, 192, kernel_size=(8,), stride=(4,), padding=(2,)) + ) + (nn): ConditionalConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (3): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (4): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (5): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (6): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (7): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + (g_prenet): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (out_proj): Conv1d(192, 32, kernel_size=(1,), stride=(1,)) + ) + (prior_flow): ResFlow( + (flows): ModuleList( + (0): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (1): FlipLayer() + (2): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (3): FlipLayer() + (4): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (5): FlipLayer() + (6): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (7): FlipLayer() + ) + ) + (decoder): FVAEDecoder( + (pre_net): Sequential( + (0): ConvTranspose1d(16, 192, kernel_size=(4,), stride=(4,)) + ) + (nn): ConditionalConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (3): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + (g_prenet): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (out_proj): Conv1d(192, 80, kernel_size=(1,), stride=(1,)) + ) + ) + (word_pos_proj): Linear(in_features=192, out_features=192, bias=True) +) +| model Trainable Parameters: 23.020M +| dur_predictor Trainable Parameters: 0.555M +| length_regulator Trainable Parameters: 0.000M +| ph_encoder Trainable Parameters: 4.753M +| ph2word_encoder Trainable Parameters: 2.081M +| sin_pos Trainable Parameters: 0.000M +| enc_pos_proj Trainable Parameters: 0.074M +| dec_query_proj Trainable Parameters: 0.074M +| dec_res_proj Trainable Parameters: 0.074M +| attn Trainable Parameters: 0.147M +| text_encoder_postnet Trainable Parameters: 2.771M +| fvae Trainable Parameters: 12.453M +| word_pos_proj Trainable Parameters: 0.037M +| fvae.g_pre_net Trainable Parameters: 0.295M +| fvae.encoder Trainable Parameters: 7.444M +| fvae.prior_flow Trainable Parameters: 0.917M +| fvae.decoder Trainable Parameters: 3.796M diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221206230829.txt b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221206230829.txt new file mode 100644 index 0000000000000000000000000000000000000000..aecaf0199c444900e5e2f72531540643c3bb8558 --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221206230829.txt @@ -0,0 +1,636 @@ +| Copied codes to checkpoints/1206_lj/ps_adv_baseline/codes/20221206230829. +| model Arch: PortaSpeech( + (dur_predictor): DurationPredictor( + (conv): ModuleList( + (0): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + (1): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + (2): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + ) + (linear): Sequential( + (0): Linear(in_features=192, out_features=1, bias=True) + (1): Softplus(beta=1, threshold=20) + ) + ) + (length_regulator): LengthRegulator() + (ph_encoder): RelTransformerEncoder( + (emb): Embedding(79, 192, padding_idx=0) + (pre): ConvReluNorm( + (conv_layers): ModuleList( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + ) + (norm_layers): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + ) + (relu_drop): Sequential( + (0): ReLU() + (1): Dropout(p=0, inplace=False) + ) + (proj): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + ) + (encoder): Encoder( + (drop): Dropout(p=0.0, inplace=False) + (attn_layers): ModuleList( + (0): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_1): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (ffn_layers): ModuleList( + (0): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_2): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (last_ln): LayerNorm() + ) + ) + (ph2word_encoder): RelTransformerEncoder( + (pre): ConvReluNorm( + (conv_layers): ModuleList( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + ) + (norm_layers): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + ) + (relu_drop): Sequential( + (0): ReLU() + (1): Dropout(p=0, inplace=False) + ) + (proj): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + ) + (encoder): Encoder( + (drop): Dropout(p=0.0, inplace=False) + (attn_layers): ModuleList( + (0): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_1): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (ffn_layers): ModuleList( + (0): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_2): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (last_ln): LayerNorm() + ) + ) + (sin_pos): SinusoidalPosEmb() + (enc_pos_proj): Linear(in_features=384, out_features=192, bias=True) + (dec_query_proj): Linear(in_features=384, out_features=192, bias=True) + (dec_res_proj): Linear(in_features=384, out_features=192, bias=True) + (attn): MultiheadAttention( + (out_proj): Linear(in_features=192, out_features=192, bias=False) + ) + (text_encoder_postnet): ConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (fvae): FVAE( + (g_pre_net): Sequential( + (0): Conv1d(192, 192, kernel_size=(8,), stride=(4,), padding=(2,)) + ) + (encoder): FVAEEncoder( + (pre_net): Sequential( + (0): Conv1d(80, 192, kernel_size=(8,), stride=(4,), padding=(2,)) + ) + (nn): ConditionalConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (3): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (4): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (5): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (6): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (7): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + (g_prenet): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (out_proj): Conv1d(192, 32, kernel_size=(1,), stride=(1,)) + ) + (prior_flow): ResFlow( + (flows): ModuleList( + (0): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (1): FlipLayer() + (2): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (3): FlipLayer() + (4): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (5): FlipLayer() + (6): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (7): FlipLayer() + ) + ) + (decoder): FVAEDecoder( + (pre_net): Sequential( + (0): ConvTranspose1d(16, 192, kernel_size=(4,), stride=(4,)) + ) + (nn): ConditionalConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (3): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + (g_prenet): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (out_proj): Conv1d(192, 80, kernel_size=(1,), stride=(1,)) + ) + ) + (word_pos_proj): Linear(in_features=192, out_features=192, bias=True) +) +| model Trainable Parameters: 23.020M +| dur_predictor Trainable Parameters: 0.555M +| length_regulator Trainable Parameters: 0.000M +| ph_encoder Trainable Parameters: 4.753M +| ph2word_encoder Trainable Parameters: 2.081M +| sin_pos Trainable Parameters: 0.000M +| enc_pos_proj Trainable Parameters: 0.074M +| dec_query_proj Trainable Parameters: 0.074M +| dec_res_proj Trainable Parameters: 0.074M +| attn Trainable Parameters: 0.147M +| text_encoder_postnet Trainable Parameters: 2.771M +| fvae Trainable Parameters: 12.453M +| word_pos_proj Trainable Parameters: 0.037M +| fvae.g_pre_net Trainable Parameters: 0.295M +| fvae.encoder Trainable Parameters: 7.444M +| fvae.prior_flow Trainable Parameters: 0.917M +| fvae.decoder Trainable Parameters: 3.796M +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@2000: {'total_loss': 59.562, 'kl_v': 0.2045, 'kl': 0.0409, 'l1': 0.1754, 'ssim': 0.2442, 'wdur': 0.2047, 'abs_word_dur_error': 56.1648, 'abs_sent_dur_error': 0.6264, 'fr': 0.5262, 'pcr': 0.4182, 'dfr': 0.9566, 'pe_perceptual_layer_0': 0.4151, 'pe_perceptual_layer_1': 1.1653, 'pe_perceptual_layer_2': 1.3079, 'pe_perceptual_layer_3': 0.2109, 'pe_perceptual_layer_4': 0.3054, 'pe_perceptual_layer_5': 1.3284, 'pe_perceptual_layer_6': 1.4444, 'pe_perceptual_layer_7': 1.304, 'pe_perceptual_layer_8': 0.9702, 'pe_perceptual_layer_9': 0.1613} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@4000: {'total_loss': 53.7021, 'kl_v': 0.1406, 'kl': 0.0562, 'l1': 0.1469, 'ssim': 0.1944, 'wdur': 0.1837, 'abs_word_dur_error': 50.0172, 'abs_sent_dur_error': 0.4633, 'fr': 0.7588, 'pcr': 0.7681, 'dfr': 0.9729, 'pe_perceptual_layer_0': 0.1982, 'pe_perceptual_layer_1': 0.5526, 'pe_perceptual_layer_2': 0.6748, 'pe_perceptual_layer_3': 0.1113, 'pe_perceptual_layer_4': 0.1336, 'pe_perceptual_layer_5': 0.7798, 'pe_perceptual_layer_6': 0.8596, 'pe_perceptual_layer_7': 0.6976, 'pe_perceptual_layer_8': 0.5093, 'pe_perceptual_layer_9': 0.1068} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@6000: {'total_loss': 54.5089, 'kl_v': 0.1084, 'kl': 0.065, 'l1': 0.1452, 'ssim': 0.1852, 'wdur': 0.1834, 'abs_word_dur_error': 50.629, 'abs_sent_dur_error': 0.6202, 'fr': 0.7829, 'pcr': 0.8159, 'dfr': 0.9736, 'pe_perceptual_layer_0': 0.182, 'pe_perceptual_layer_1': 0.5054, 'pe_perceptual_layer_2': 0.6256, 'pe_perceptual_layer_3': 0.105, 'pe_perceptual_layer_4': 0.1249, 'pe_perceptual_layer_5': 0.7368, 'pe_perceptual_layer_6': 0.8138, 'pe_perceptual_layer_7': 0.6599, 'pe_perceptual_layer_8': 0.481, 'pe_perceptual_layer_9': 0.1047} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221207090222.txt b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221207090222.txt new file mode 100644 index 0000000000000000000000000000000000000000..8eb23dc166c5404d4ce6b15c5682fe39cc2a9cb9 --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221207090222.txt @@ -0,0 +1,679 @@ +| Copied codes to checkpoints/1206_lj/ps_adv_baseline/codes/20221207090222. +| model Arch: PortaSpeech( + (dur_predictor): DurationPredictor( + (conv): ModuleList( + (0): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + (1): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + (2): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + ) + (linear): Sequential( + (0): Linear(in_features=192, out_features=1, bias=True) + (1): Softplus(beta=1, threshold=20) + ) + ) + (length_regulator): LengthRegulator() + (ph_encoder): RelTransformerEncoder( + (emb): Embedding(79, 192, padding_idx=0) + (pre): ConvReluNorm( + (conv_layers): ModuleList( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + ) + (norm_layers): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + ) + (relu_drop): Sequential( + (0): ReLU() + (1): Dropout(p=0, inplace=False) + ) + (proj): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + ) + (encoder): Encoder( + (drop): Dropout(p=0.0, inplace=False) + (attn_layers): ModuleList( + (0): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_1): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (ffn_layers): ModuleList( + (0): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_2): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (last_ln): LayerNorm() + ) + ) + (ph2word_encoder): RelTransformerEncoder( + (pre): ConvReluNorm( + (conv_layers): ModuleList( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + ) + (norm_layers): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + ) + (relu_drop): Sequential( + (0): ReLU() + (1): Dropout(p=0, inplace=False) + ) + (proj): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + ) + (encoder): Encoder( + (drop): Dropout(p=0.0, inplace=False) + (attn_layers): ModuleList( + (0): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_1): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (ffn_layers): ModuleList( + (0): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_2): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (last_ln): LayerNorm() + ) + ) + (sin_pos): SinusoidalPosEmb() + (enc_pos_proj): Linear(in_features=384, out_features=192, bias=True) + (dec_query_proj): Linear(in_features=384, out_features=192, bias=True) + (dec_res_proj): Linear(in_features=384, out_features=192, bias=True) + (attn): MultiheadAttention( + (out_proj): Linear(in_features=192, out_features=192, bias=False) + ) + (text_encoder_postnet): ConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (fvae): FVAE( + (g_pre_net): Sequential( + (0): Conv1d(192, 192, kernel_size=(8,), stride=(4,), padding=(2,)) + ) + (encoder): FVAEEncoder( + (pre_net): Sequential( + (0): Conv1d(80, 192, kernel_size=(8,), stride=(4,), padding=(2,)) + ) + (nn): ConditionalConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (3): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (4): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (5): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (6): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (7): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + (g_prenet): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (out_proj): Conv1d(192, 32, kernel_size=(1,), stride=(1,)) + ) + (prior_flow): ResFlow( + (flows): ModuleList( + (0): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (1): FlipLayer() + (2): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (3): FlipLayer() + (4): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (5): FlipLayer() + (6): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (7): FlipLayer() + ) + ) + (decoder): FVAEDecoder( + (pre_net): Sequential( + (0): ConvTranspose1d(16, 192, kernel_size=(4,), stride=(4,)) + ) + (nn): ConditionalConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (3): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + (g_prenet): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (out_proj): Conv1d(192, 80, kernel_size=(1,), stride=(1,)) + ) + ) + (word_pos_proj): Linear(in_features=192, out_features=192, bias=True) +) +| model Trainable Parameters: 23.020M +| dur_predictor Trainable Parameters: 0.555M +| length_regulator Trainable Parameters: 0.000M +| ph_encoder Trainable Parameters: 4.753M +| ph2word_encoder Trainable Parameters: 2.081M +| sin_pos Trainable Parameters: 0.000M +| enc_pos_proj Trainable Parameters: 0.074M +| dec_query_proj Trainable Parameters: 0.074M +| dec_res_proj Trainable Parameters: 0.074M +| attn Trainable Parameters: 0.147M +| text_encoder_postnet Trainable Parameters: 2.771M +| fvae Trainable Parameters: 12.453M +| word_pos_proj Trainable Parameters: 0.037M +| fvae.g_pre_net Trainable Parameters: 0.295M +| fvae.encoder Trainable Parameters: 7.444M +| fvae.prior_flow Trainable Parameters: 0.917M +| fvae.decoder Trainable Parameters: 3.796M +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@10000: {'total_loss': 51.8263, 'kl_v': 0.0694, 'kl': 0.0694, 'l1': 0.1468, 'ssim': 0.1864, 'wdur': 0.1788, 'abs_word_dur_error': 48.102, 'abs_sent_dur_error': 0.4636, 'fr': 0.8025, 'pcr': 0.8348, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.1841, 'pe_perceptual_layer_1': 0.5094, 'pe_perceptual_layer_2': 0.629, 'pe_perceptual_layer_3': 0.1065, 'pe_perceptual_layer_4': 0.1281, 'pe_perceptual_layer_5': 0.755, 'pe_perceptual_layer_6': 0.8343, 'pe_perceptual_layer_7': 0.6819, 'pe_perceptual_layer_8': 0.4911, 'pe_perceptual_layer_9': 0.1061, 'f0_dtw': 30.6725} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@12000: {'total_loss': 53.5098, 'kl_v': 0.0745, 'kl': 0.0745, 'l1': 0.1508, 'ssim': 0.1898, 'wdur': 0.1823, 'abs_word_dur_error': 49.6133, 'abs_sent_dur_error': 0.5923, 'fr': 0.81, 'pcr': 0.8491, 'dfr': 0.9731, 'pe_perceptual_layer_0': 0.1921, 'pe_perceptual_layer_1': 0.5269, 'pe_perceptual_layer_2': 0.6512, 'pe_perceptual_layer_3': 0.1103, 'pe_perceptual_layer_4': 0.1327, 'pe_perceptual_layer_5': 0.767, 'pe_perceptual_layer_6': 0.845, 'pe_perceptual_layer_7': 0.6906, 'pe_perceptual_layer_8': 0.4972, 'pe_perceptual_layer_9': 0.1081} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@14000: {'total_loss': 54.312, 'kl_v': 0.0816, 'kl': 0.0816, 'l1': 0.1543, 'ssim': 0.1961, 'wdur': 0.1824, 'abs_word_dur_error': 50.3983, 'abs_sent_dur_error': 0.5875, 'fr': 0.8086, 'pcr': 0.8486, 'dfr': 0.9729, 'pe_perceptual_layer_0': 0.1969, 'pe_perceptual_layer_1': 0.5363, 'pe_perceptual_layer_2': 0.657, 'pe_perceptual_layer_3': 0.1113, 'pe_perceptual_layer_4': 0.134, 'pe_perceptual_layer_5': 0.777, 'pe_perceptual_layer_6': 0.8539, 'pe_perceptual_layer_7': 0.6981, 'pe_perceptual_layer_8': 0.5031, 'pe_perceptual_layer_9': 0.1097} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@16000: {'total_loss': 55.4649, 'kl_v': 0.0893, 'kl': 0.0893, 'l1': 0.1618, 'ssim': 0.2059, 'wdur': 0.1891, 'abs_word_dur_error': 51.504, 'abs_sent_dur_error': 0.5882, 'fr': 0.8132, 'pcr': 0.8511, 'dfr': 0.9731, 'pe_perceptual_layer_0': 0.2142, 'pe_perceptual_layer_1': 0.578, 'pe_perceptual_layer_2': 0.707, 'pe_perceptual_layer_3': 0.12, 'pe_perceptual_layer_4': 0.1447, 'pe_perceptual_layer_5': 0.8136, 'pe_perceptual_layer_6': 0.8941, 'pe_perceptual_layer_7': 0.7341, 'pe_perceptual_layer_8': 0.5293, 'pe_perceptual_layer_9': 0.1145} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@18000: {'total_loss': 53.6004, 'kl_v': 0.0933, 'kl': 0.0933, 'l1': 0.1674, 'ssim': 0.2133, 'wdur': 0.1827, 'abs_word_dur_error': 49.7385, 'abs_sent_dur_error': 0.4656, 'fr': 0.8183, 'pcr': 0.8552, 'dfr': 0.9728, 'pe_perceptual_layer_0': 0.2271, 'pe_perceptual_layer_1': 0.6122, 'pe_perceptual_layer_2': 0.7469, 'pe_perceptual_layer_3': 0.1274, 'pe_perceptual_layer_4': 0.1572, 'pe_perceptual_layer_5': 0.8537, 'pe_perceptual_layer_6': 0.932, 'pe_perceptual_layer_7': 0.7661, 'pe_perceptual_layer_8': 0.5547, 'pe_perceptual_layer_9': 0.1191} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@20000: {'total_loss': 53.1732, 'kl_v': 0.0995, 'kl': 0.0995, 'l1': 0.1683, 'ssim': 0.2151, 'wdur': 0.181, 'abs_word_dur_error': 49.2487, 'abs_sent_dur_error': 0.517, 'fr': 0.8178, 'pcr': 0.8535, 'dfr': 0.9727, 'pe_perceptual_layer_0': 0.2375, 'pe_perceptual_layer_1': 0.645, 'pe_perceptual_layer_2': 0.7812, 'pe_perceptual_layer_3': 0.1314, 'pe_perceptual_layer_4': 0.1646, 'pe_perceptual_layer_5': 0.8831, 'pe_perceptual_layer_6': 0.9679, 'pe_perceptual_layer_7': 0.8076, 'pe_perceptual_layer_8': 0.5786, 'pe_perceptual_layer_9': 0.1198, 'f0_dtw': 28.971} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@22000: {'total_loss': 54.2263, 'kl_v': 0.109, 'kl': 0.109, 'l1': 0.1715, 'ssim': 0.2249, 'wdur': 0.1857, 'abs_word_dur_error': 50.2638, 'abs_sent_dur_error': 0.5163, 'fr': 0.8181, 'pcr': 0.8554, 'dfr': 0.9726, 'pe_perceptual_layer_0': 0.2599, 'pe_perceptual_layer_1': 0.7, 'pe_perceptual_layer_2': 0.8383, 'pe_perceptual_layer_3': 0.1421, 'pe_perceptual_layer_4': 0.1772, 'pe_perceptual_layer_5': 0.9223, 'pe_perceptual_layer_6': 1.0169, 'pe_perceptual_layer_7': 0.8602, 'pe_perceptual_layer_8': 0.6082, 'pe_perceptual_layer_9': 0.121} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@24000: {'total_loss': 55.3468, 'kl_v': 0.111, 'kl': 0.111, 'l1': 0.1767, 'ssim': 0.2307, 'wdur': 0.1869, 'abs_word_dur_error': 51.3453, 'abs_sent_dur_error': 0.5444, 'fr': 0.8156, 'pcr': 0.8526, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.2682, 'pe_perceptual_layer_1': 0.7222, 'pe_perceptual_layer_2': 0.8621, 'pe_perceptual_layer_3': 0.1456, 'pe_perceptual_layer_4': 0.1827, 'pe_perceptual_layer_5': 0.9396, 'pe_perceptual_layer_6': 1.0401, 'pe_perceptual_layer_7': 0.8863, 'pe_perceptual_layer_8': 0.6256, 'pe_perceptual_layer_9': 0.124} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@26000: {'total_loss': 57.2875, 'kl_v': 0.118, 'kl': 0.118, 'l1': 0.179, 'ssim': 0.2338, 'wdur': 0.1912, 'abs_word_dur_error': 53.1697, 'abs_sent_dur_error': 0.6388, 'fr': 0.8149, 'pcr': 0.8515, 'dfr': 0.9723, 'pe_perceptual_layer_0': 0.2806, 'pe_perceptual_layer_1': 0.7548, 'pe_perceptual_layer_2': 0.8926, 'pe_perceptual_layer_3': 0.15, 'pe_perceptual_layer_4': 0.1894, 'pe_perceptual_layer_5': 0.9599, 'pe_perceptual_layer_6': 1.0611, 'pe_perceptual_layer_7': 0.9085, 'pe_perceptual_layer_8': 0.6403, 'pe_perceptual_layer_9': 0.1249} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@28000: {'total_loss': 53.7622, 'kl_v': 0.1137, 'kl': 0.1137, 'l1': 0.1823, 'ssim': 0.2406, 'wdur': 0.1833, 'abs_word_dur_error': 49.793, 'abs_sent_dur_error': 0.4878, 'fr': 0.8188, 'pcr': 0.8564, 'dfr': 0.9727, 'pe_perceptual_layer_0': 0.2969, 'pe_perceptual_layer_1': 0.7963, 'pe_perceptual_layer_2': 0.933, 'pe_perceptual_layer_3': 0.1564, 'pe_perceptual_layer_4': 0.1992, 'pe_perceptual_layer_5': 0.9899, 'pe_perceptual_layer_6': 1.0978, 'pe_perceptual_layer_7': 0.9498, 'pe_perceptual_layer_8': 0.6627, 'pe_perceptual_layer_9': 0.1256} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@30000: {'total_loss': 54.1469, 'kl_v': 0.1197, 'kl': 0.1197, 'l1': 0.1823, 'ssim': 0.239, 'wdur': 0.1829, 'abs_word_dur_error': 50.2123, 'abs_sent_dur_error': 0.4498, 'fr': 0.8152, 'pcr': 0.8535, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.3014, 'pe_perceptual_layer_1': 0.8102, 'pe_perceptual_layer_2': 0.9493, 'pe_perceptual_layer_3': 0.159, 'pe_perceptual_layer_4': 0.2041, 'pe_perceptual_layer_5': 1.0036, 'pe_perceptual_layer_6': 1.1144, 'pe_perceptual_layer_7': 0.9679, 'pe_perceptual_layer_8': 0.6753, 'pe_perceptual_layer_9': 0.1265, 'f0_dtw': 28.6883} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@32000: {'total_loss': 55.0054, 'kl_v': 0.1204, 'kl': 0.1204, 'l1': 0.1859, 'ssim': 0.2478, 'wdur': 0.1847, 'abs_word_dur_error': 50.9757, 'abs_sent_dur_error': 0.5245, 'fr': 0.8181, 'pcr': 0.8552, 'dfr': 0.9726, 'pe_perceptual_layer_0': 0.3144, 'pe_perceptual_layer_1': 0.8395, 'pe_perceptual_layer_2': 0.9773, 'pe_perceptual_layer_3': 0.1644, 'pe_perceptual_layer_4': 0.2103, 'pe_perceptual_layer_5': 1.0247, 'pe_perceptual_layer_6': 1.1347, 'pe_perceptual_layer_7': 0.9875, 'pe_perceptual_layer_8': 0.6885, 'pe_perceptual_layer_9': 0.1284} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@34000: {'total_loss': 54.3327, 'kl_v': 0.1173, 'kl': 0.1173, 'l1': 0.1875, 'ssim': 0.2496, 'wdur': 0.182, 'abs_word_dur_error': 50.3724, 'abs_sent_dur_error': 0.4575, 'fr': 0.8195, 'pcr': 0.8571, 'dfr': 0.9726, 'pe_perceptual_layer_0': 0.3217, 'pe_perceptual_layer_1': 0.8593, 'pe_perceptual_layer_2': 0.9981, 'pe_perceptual_layer_3': 0.1672, 'pe_perceptual_layer_4': 0.2146, 'pe_perceptual_layer_5': 1.0359, 'pe_perceptual_layer_6': 1.1503, 'pe_perceptual_layer_7': 1.0042, 'pe_perceptual_layer_8': 0.7021, 'pe_perceptual_layer_9': 0.1307} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@36000: {'total_loss': 55.3515, 'kl_v': 0.1228, 'kl': 0.1228, 'l1': 0.1888, 'ssim': 0.2538, 'wdur': 0.1875, 'abs_word_dur_error': 51.3296, 'abs_sent_dur_error': 0.501, 'fr': 0.8177, 'pcr': 0.8552, 'dfr': 0.9722, 'pe_perceptual_layer_0': 0.3238, 'pe_perceptual_layer_1': 0.8597, 'pe_perceptual_layer_2': 0.9955, 'pe_perceptual_layer_3': 0.1672, 'pe_perceptual_layer_4': 0.2141, 'pe_perceptual_layer_5': 1.0384, 'pe_perceptual_layer_6': 1.151, 'pe_perceptual_layer_7': 1.002, 'pe_perceptual_layer_8': 0.6983, 'pe_perceptual_layer_9': 0.1295} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@38000: {'total_loss': 54.6426, 'kl_v': 0.1224, 'kl': 0.1224, 'l1': 0.191, 'ssim': 0.2556, 'wdur': 0.1842, 'abs_word_dur_error': 50.6374, 'abs_sent_dur_error': 0.4856, 'fr': 0.8167, 'pcr': 0.8548, 'dfr': 0.9726, 'pe_perceptual_layer_0': 0.3313, 'pe_perceptual_layer_1': 0.8839, 'pe_perceptual_layer_2': 1.0205, 'pe_perceptual_layer_3': 0.1706, 'pe_perceptual_layer_4': 0.2198, 'pe_perceptual_layer_5': 1.0535, 'pe_perceptual_layer_6': 1.1696, 'pe_perceptual_layer_7': 1.0239, 'pe_perceptual_layer_8': 0.7111, 'pe_perceptual_layer_9': 0.1302} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@40000: {'total_loss': 55.0365, 'kl_v': 0.1184, 'kl': 0.1184, 'l1': 0.1911, 'ssim': 0.2572, 'wdur': 0.1867, 'abs_word_dur_error': 50.9974, 'abs_sent_dur_error': 0.5205, 'fr': 0.8174, 'pcr': 0.857, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.3345, 'pe_perceptual_layer_1': 0.8903, 'pe_perceptual_layer_2': 1.0342, 'pe_perceptual_layer_3': 0.1735, 'pe_perceptual_layer_4': 0.2225, 'pe_perceptual_layer_5': 1.0598, 'pe_perceptual_layer_6': 1.1767, 'pe_perceptual_layer_7': 1.0317, 'pe_perceptual_layer_8': 0.7156, 'pe_perceptual_layer_9': 0.1322, 'f0_dtw': 28.4247} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@42000: {'total_loss': 54.9636, 'kl_v': 0.1212, 'kl': 0.1212, 'l1': 0.1909, 'ssim': 0.253, 'wdur': 0.1854, 'abs_word_dur_error': 50.9491, 'abs_sent_dur_error': 0.4912, 'fr': 0.8202, 'pcr': 0.8589, 'dfr': 0.9726, 'pe_perceptual_layer_0': 0.3288, 'pe_perceptual_layer_1': 0.8773, 'pe_perceptual_layer_2': 1.0153, 'pe_perceptual_layer_3': 0.1697, 'pe_perceptual_layer_4': 0.2193, 'pe_perceptual_layer_5': 1.0572, 'pe_perceptual_layer_6': 1.1695, 'pe_perceptual_layer_7': 1.0193, 'pe_perceptual_layer_8': 0.7154, 'pe_perceptual_layer_9': 0.1321} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@44000: {'total_loss': 55.1718, 'kl_v': 0.1175, 'kl': 0.1175, 'l1': 0.1962, 'ssim': 0.2637, 'wdur': 0.1875, 'abs_word_dur_error': 51.1125, 'abs_sent_dur_error': 0.5292, 'fr': 0.8185, 'pcr': 0.8568, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.3489, 'pe_perceptual_layer_1': 0.9226, 'pe_perceptual_layer_2': 1.0591, 'pe_perceptual_layer_3': 0.1773, 'pe_perceptual_layer_4': 0.2288, 'pe_perceptual_layer_5': 1.0839, 'pe_perceptual_layer_6': 1.2039, 'pe_perceptual_layer_7': 1.0597, 'pe_perceptual_layer_8': 0.7356, 'pe_perceptual_layer_9': 0.1327} diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221207151643.txt b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221207151643.txt new file mode 100644 index 0000000000000000000000000000000000000000..f818a2e0dcfc151eaee26e0c2e7c792528b55f35 --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221207151643.txt @@ -0,0 +1,801 @@ +| Copied codes to checkpoints/1206_lj/ps_adv_baseline/codes/20221207151643. +| model Arch: PortaSpeech( + (dur_predictor): DurationPredictor( + (conv): ModuleList( + (0): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + (1): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + (2): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + ) + (linear): Sequential( + (0): Linear(in_features=192, out_features=1, bias=True) + (1): Softplus(beta=1, threshold=20) + ) + ) + (length_regulator): LengthRegulator() + (ph_encoder): RelTransformerEncoder( + (emb): Embedding(79, 192, padding_idx=0) + (pre): ConvReluNorm( + (conv_layers): ModuleList( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + ) + (norm_layers): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + ) + (relu_drop): Sequential( + (0): ReLU() + (1): Dropout(p=0, inplace=False) + ) + (proj): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + ) + (encoder): Encoder( + (drop): Dropout(p=0.0, inplace=False) + (attn_layers): ModuleList( + (0): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_1): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (ffn_layers): ModuleList( + (0): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_2): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (last_ln): LayerNorm() + ) + ) + (ph2word_encoder): RelTransformerEncoder( + (pre): ConvReluNorm( + (conv_layers): ModuleList( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + ) + (norm_layers): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + ) + (relu_drop): Sequential( + (0): ReLU() + (1): Dropout(p=0, inplace=False) + ) + (proj): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + ) + (encoder): Encoder( + (drop): Dropout(p=0.0, inplace=False) + (attn_layers): ModuleList( + (0): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_1): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (ffn_layers): ModuleList( + (0): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_2): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (last_ln): LayerNorm() + ) + ) + (sin_pos): SinusoidalPosEmb() + (enc_pos_proj): Linear(in_features=384, out_features=192, bias=True) + (dec_query_proj): Linear(in_features=384, out_features=192, bias=True) + (dec_res_proj): Linear(in_features=384, out_features=192, bias=True) + (attn): MultiheadAttention( + (out_proj): Linear(in_features=192, out_features=192, bias=False) + ) + (text_encoder_postnet): ConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (fvae): FVAE( + (g_pre_net): Sequential( + (0): Conv1d(192, 192, kernel_size=(8,), stride=(4,), padding=(2,)) + ) + (encoder): FVAEEncoder( + (pre_net): Sequential( + (0): Conv1d(80, 192, kernel_size=(8,), stride=(4,), padding=(2,)) + ) + (nn): ConditionalConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (3): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (4): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (5): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (6): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (7): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + (g_prenet): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (out_proj): Conv1d(192, 32, kernel_size=(1,), stride=(1,)) + ) + (prior_flow): ResFlow( + (flows): ModuleList( + (0): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (1): FlipLayer() + (2): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (3): FlipLayer() + (4): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (5): FlipLayer() + (6): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (7): FlipLayer() + ) + ) + (decoder): FVAEDecoder( + (pre_net): Sequential( + (0): ConvTranspose1d(16, 192, kernel_size=(4,), stride=(4,)) + ) + (nn): ConditionalConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (3): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU(approximate=none) + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + (g_prenet): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (out_proj): Conv1d(192, 80, kernel_size=(1,), stride=(1,)) + ) + ) + (word_pos_proj): Linear(in_features=192, out_features=192, bias=True) +) +| model Trainable Parameters: 23.020M +| dur_predictor Trainable Parameters: 0.555M +| length_regulator Trainable Parameters: 0.000M +| ph_encoder Trainable Parameters: 4.753M +| ph2word_encoder Trainable Parameters: 2.081M +| sin_pos Trainable Parameters: 0.000M +| enc_pos_proj Trainable Parameters: 0.074M +| dec_query_proj Trainable Parameters: 0.074M +| dec_res_proj Trainable Parameters: 0.074M +| attn Trainable Parameters: 0.147M +| text_encoder_postnet Trainable Parameters: 2.771M +| fvae Trainable Parameters: 12.453M +| word_pos_proj Trainable Parameters: 0.037M +| fvae.g_pre_net Trainable Parameters: 0.295M +| fvae.encoder Trainable Parameters: 7.444M +| fvae.prior_flow Trainable Parameters: 0.917M +| fvae.decoder Trainable Parameters: 3.796M +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@46000: {'total_loss': 55.3226, 'kl_v': 0.1173, 'kl': 0.1173, 'l1': 0.1959, 'ssim': 0.2654, 'wdur': 0.1853, 'abs_word_dur_error': 51.2645, 'abs_sent_dur_error': 0.5245, 'fr': 0.8207, 'pcr': 0.8591, 'dfr': 0.9726, 'pe_perceptual_layer_0': 0.36, 'pe_perceptual_layer_1': 0.9525, 'pe_perceptual_layer_2': 1.0868, 'pe_perceptual_layer_3': 0.1808, 'pe_perceptual_layer_4': 0.2346, 'pe_perceptual_layer_5': 1.0964, 'pe_perceptual_layer_6': 1.2227, 'pe_perceptual_layer_7': 1.0857, 'pe_perceptual_layer_8': 0.7487, 'pe_perceptual_layer_9': 0.1327} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@48000: {'total_loss': 55.0375, 'kl_v': 0.1132, 'kl': 0.1132, 'l1': 0.1965, 'ssim': 0.265, 'wdur': 0.187, 'abs_word_dur_error': 50.9839, 'abs_sent_dur_error': 0.5326, 'fr': 0.8175, 'pcr': 0.8561, 'dfr': 0.9726, 'pe_perceptual_layer_0': 0.3585, 'pe_perceptual_layer_1': 0.9479, 'pe_perceptual_layer_2': 1.0824, 'pe_perceptual_layer_3': 0.1814, 'pe_perceptual_layer_4': 0.235, 'pe_perceptual_layer_5': 1.1008, 'pe_perceptual_layer_6': 1.2264, 'pe_perceptual_layer_7': 1.0889, 'pe_perceptual_layer_8': 0.7539, 'pe_perceptual_layer_9': 0.1331} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@50000: {'total_loss': 55.2445, 'kl_v': 0.1254, 'kl': 0.1254, 'l1': 0.195, 'ssim': 0.2642, 'wdur': 0.187, 'abs_word_dur_error': 51.1757, 'abs_sent_dur_error': 0.5226, 'fr': 0.8197, 'pcr': 0.857, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.3588, 'pe_perceptual_layer_1': 0.9461, 'pe_perceptual_layer_2': 1.0781, 'pe_perceptual_layer_3': 0.1798, 'pe_perceptual_layer_4': 0.233, 'pe_perceptual_layer_5': 1.0947, 'pe_perceptual_layer_6': 1.2205, 'pe_perceptual_layer_7': 1.0836, 'pe_perceptual_layer_8': 0.7468, 'pe_perceptual_layer_9': 0.1321, 'f0_dtw': 27.2883} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@52000: {'total_loss': 56.9267, 'kl_v': 0.1176, 'kl': 0.1176, 'l1': 0.1959, 'ssim': 0.2685, 'wdur': 0.1906, 'abs_word_dur_error': 52.8086, 'abs_sent_dur_error': 0.5814, 'fr': 0.8171, 'pcr': 0.8568, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.3656, 'pe_perceptual_layer_1': 0.9636, 'pe_perceptual_layer_2': 1.0975, 'pe_perceptual_layer_3': 0.1834, 'pe_perceptual_layer_4': 0.2384, 'pe_perceptual_layer_5': 1.1114, 'pe_perceptual_layer_6': 1.2366, 'pe_perceptual_layer_7': 1.0981, 'pe_perceptual_layer_8': 0.7601, 'pe_perceptual_layer_9': 0.1356} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@54000: {'total_loss': 55.1061, 'kl_v': 0.1152, 'kl': 0.1152, 'l1': 0.2009, 'ssim': 0.2724, 'wdur': 0.1862, 'abs_word_dur_error': 51.0608, 'abs_sent_dur_error': 0.5063, 'fr': 0.8188, 'pcr': 0.8577, 'dfr': 0.9726, 'pe_perceptual_layer_0': 0.3752, 'pe_perceptual_layer_1': 0.9877, 'pe_perceptual_layer_2': 1.1161, 'pe_perceptual_layer_3': 0.1861, 'pe_perceptual_layer_4': 0.2431, 'pe_perceptual_layer_5': 1.1268, 'pe_perceptual_layer_6': 1.2506, 'pe_perceptual_layer_7': 1.1143, 'pe_perceptual_layer_8': 0.7707, 'pe_perceptual_layer_9': 0.1353} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@56000: {'total_loss': 55.0476, 'kl_v': 0.1089, 'kl': 0.1089, 'l1': 0.1995, 'ssim': 0.27, 'wdur': 0.1857, 'abs_word_dur_error': 51.0516, 'abs_sent_dur_error': 0.4765, 'fr': 0.8183, 'pcr': 0.8558, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.3728, 'pe_perceptual_layer_1': 0.9814, 'pe_perceptual_layer_2': 1.1146, 'pe_perceptual_layer_3': 0.1859, 'pe_perceptual_layer_4': 0.2427, 'pe_perceptual_layer_5': 1.1238, 'pe_perceptual_layer_6': 1.2538, 'pe_perceptual_layer_7': 1.1206, 'pe_perceptual_layer_8': 0.7716, 'pe_perceptual_layer_9': 0.1336} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@58000: {'total_loss': 55.3827, 'kl_v': 0.1112, 'kl': 0.1112, 'l1': 0.1994, 'ssim': 0.2715, 'wdur': 0.1857, 'abs_word_dur_error': 51.3235, 'abs_sent_dur_error': 0.5339, 'fr': 0.8176, 'pcr': 0.8561, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.3751, 'pe_perceptual_layer_1': 0.9873, 'pe_perceptual_layer_2': 1.1206, 'pe_perceptual_layer_3': 0.1867, 'pe_perceptual_layer_4': 0.2436, 'pe_perceptual_layer_5': 1.1251, 'pe_perceptual_layer_6': 1.2534, 'pe_perceptual_layer_7': 1.1198, 'pe_perceptual_layer_8': 0.7708, 'pe_perceptual_layer_9': 0.1336} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@60000: {'total_loss': 56.24, 'kl_v': 0.1043, 'kl': 0.1043, 'l1': 0.2008, 'ssim': 0.2747, 'wdur': 0.1874, 'abs_word_dur_error': 52.1623, 'abs_sent_dur_error': 0.5591, 'fr': 0.8181, 'pcr': 0.8567, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.3858, 'pe_perceptual_layer_1': 1.0108, 'pe_perceptual_layer_2': 1.141, 'pe_perceptual_layer_3': 0.1908, 'pe_perceptual_layer_4': 0.2492, 'pe_perceptual_layer_5': 1.1384, 'pe_perceptual_layer_6': 1.2719, 'pe_perceptual_layer_7': 1.1454, 'pe_perceptual_layer_8': 0.7867, 'pe_perceptual_layer_9': 0.1354, 'f0_dtw': 27.82} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@62000: {'total_loss': 56.1436, 'kl_v': 0.1071, 'kl': 0.1071, 'l1': 0.201, 'ssim': 0.2732, 'wdur': 0.1879, 'abs_word_dur_error': 52.0535, 'abs_sent_dur_error': 0.5683, 'fr': 0.8177, 'pcr': 0.8553, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.3831, 'pe_perceptual_layer_1': 1.0069, 'pe_perceptual_layer_2': 1.1375, 'pe_perceptual_layer_3': 0.1888, 'pe_perceptual_layer_4': 0.2476, 'pe_perceptual_layer_5': 1.1377, 'pe_perceptual_layer_6': 1.267, 'pe_perceptual_layer_7': 1.1356, 'pe_perceptual_layer_8': 0.7844, 'pe_perceptual_layer_9': 0.1359} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@64000: {'total_loss': 55.141, 'kl_v': 0.1121, 'kl': 0.1121, 'l1': 0.2013, 'ssim': 0.2751, 'wdur': 0.1854, 'abs_word_dur_error': 51.1136, 'abs_sent_dur_error': 0.4961, 'fr': 0.8174, 'pcr': 0.8554, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.3853, 'pe_perceptual_layer_1': 1.0118, 'pe_perceptual_layer_2': 1.1404, 'pe_perceptual_layer_3': 0.1899, 'pe_perceptual_layer_4': 0.2487, 'pe_perceptual_layer_5': 1.1417, 'pe_perceptual_layer_6': 1.2744, 'pe_perceptual_layer_7': 1.1455, 'pe_perceptual_layer_8': 0.785, 'pe_perceptual_layer_9': 0.1346} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@66000: {'total_loss': 54.8236, 'kl_v': 0.1019, 'kl': 0.1019, 'l1': 0.2009, 'ssim': 0.2739, 'wdur': 0.1848, 'abs_word_dur_error': 50.8206, 'abs_sent_dur_error': 0.4936, 'fr': 0.8178, 'pcr': 0.8559, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.3851, 'pe_perceptual_layer_1': 1.0105, 'pe_perceptual_layer_2': 1.14, 'pe_perceptual_layer_3': 0.1898, 'pe_perceptual_layer_4': 0.2486, 'pe_perceptual_layer_5': 1.1382, 'pe_perceptual_layer_6': 1.2713, 'pe_perceptual_layer_7': 1.1447, 'pe_perceptual_layer_8': 0.7872, 'pe_perceptual_layer_9': 0.1354} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@68000: {'total_loss': 56.4661, 'kl_v': 0.0998, 'kl': 0.0998, 'l1': 0.2027, 'ssim': 0.2764, 'wdur': 0.1886, 'abs_word_dur_error': 52.3817, 'abs_sent_dur_error': 0.5684, 'fr': 0.8185, 'pcr': 0.8578, 'dfr': 0.9723, 'pe_perceptual_layer_0': 0.3896, 'pe_perceptual_layer_1': 1.0211, 'pe_perceptual_layer_2': 1.1518, 'pe_perceptual_layer_3': 0.1925, 'pe_perceptual_layer_4': 0.2516, 'pe_perceptual_layer_5': 1.1489, 'pe_perceptual_layer_6': 1.282, 'pe_perceptual_layer_7': 1.1499, 'pe_perceptual_layer_8': 0.7939, 'pe_perceptual_layer_9': 0.1372} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@70000: {'total_loss': 56.0359, 'kl_v': 0.1054, 'kl': 0.1054, 'l1': 0.2038, 'ssim': 0.2775, 'wdur': 0.1878, 'abs_word_dur_error': 51.9967, 'abs_sent_dur_error': 0.5114, 'fr': 0.8179, 'pcr': 0.8574, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.3947, 'pe_perceptual_layer_1': 1.0341, 'pe_perceptual_layer_2': 1.1622, 'pe_perceptual_layer_3': 0.1938, 'pe_perceptual_layer_4': 0.254, 'pe_perceptual_layer_5': 1.1543, 'pe_perceptual_layer_6': 1.289, 'pe_perceptual_layer_7': 1.1601, 'pe_perceptual_layer_8': 0.7975, 'pe_perceptual_layer_9': 0.137, 'f0_dtw': 26.7273} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@72000: {'total_loss': 55.6662, 'kl_v': 0.1006, 'kl': 0.1006, 'l1': 0.2057, 'ssim': 0.2809, 'wdur': 0.1874, 'abs_word_dur_error': 51.6377, 'abs_sent_dur_error': 0.5076, 'fr': 0.8174, 'pcr': 0.8557, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.3986, 'pe_perceptual_layer_1': 1.0401, 'pe_perceptual_layer_2': 1.1671, 'pe_perceptual_layer_3': 0.195, 'pe_perceptual_layer_4': 0.2555, 'pe_perceptual_layer_5': 1.1579, 'pe_perceptual_layer_6': 1.2921, 'pe_perceptual_layer_7': 1.1642, 'pe_perceptual_layer_8': 0.8021, 'pe_perceptual_layer_9': 0.1371} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@74000: {'total_loss': 55.2744, 'kl_v': 0.101, 'kl': 0.101, 'l1': 0.2049, 'ssim': 0.2797, 'wdur': 0.1867, 'abs_word_dur_error': 51.2851, 'abs_sent_dur_error': 0.4706, 'fr': 0.8172, 'pcr': 0.8558, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.4, 'pe_perceptual_layer_1': 1.0461, 'pe_perceptual_layer_2': 1.1733, 'pe_perceptual_layer_3': 0.1957, 'pe_perceptual_layer_4': 0.2562, 'pe_perceptual_layer_5': 1.1598, 'pe_perceptual_layer_6': 1.3001, 'pe_perceptual_layer_7': 1.176, 'pe_perceptual_layer_8': 0.8041, 'pe_perceptual_layer_9': 0.1357} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@76000: {'total_loss': 55.8642, 'kl_v': 0.0932, 'kl': 0.0932, 'l1': 0.2046, 'ssim': 0.2809, 'wdur': 0.1863, 'abs_word_dur_error': 51.8476, 'abs_sent_dur_error': 0.5101, 'fr': 0.8185, 'pcr': 0.8574, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.401, 'pe_perceptual_layer_1': 1.0492, 'pe_perceptual_layer_2': 1.1768, 'pe_perceptual_layer_3': 0.1962, 'pe_perceptual_layer_4': 0.2577, 'pe_perceptual_layer_5': 1.1678, 'pe_perceptual_layer_6': 1.3047, 'pe_perceptual_layer_7': 1.1781, 'pe_perceptual_layer_8': 0.8088, 'pe_perceptual_layer_9': 0.1375} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@78000: {'total_loss': 56.0531, 'kl_v': 0.0902, 'kl': 0.0902, 'l1': 0.2053, 'ssim': 0.278, 'wdur': 0.1909, 'abs_word_dur_error': 52.0371, 'abs_sent_dur_error': 0.5183, 'fr': 0.8161, 'pcr': 0.8545, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.4042, 'pe_perceptual_layer_1': 1.0566, 'pe_perceptual_layer_2': 1.1816, 'pe_perceptual_layer_3': 0.1964, 'pe_perceptual_layer_4': 0.259, 'pe_perceptual_layer_5': 1.1686, 'pe_perceptual_layer_6': 1.3069, 'pe_perceptual_layer_7': 1.1848, 'pe_perceptual_layer_8': 0.8144, 'pe_perceptual_layer_9': 0.1376} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@80000: {'total_loss': 56.0145, 'kl_v': 0.0898, 'kl': 0.0898, 'l1': 0.2062, 'ssim': 0.2827, 'wdur': 0.1882, 'abs_word_dur_error': 51.9908, 'abs_sent_dur_error': 0.5212, 'fr': 0.817, 'pcr': 0.8563, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4066, 'pe_perceptual_layer_1': 1.0622, 'pe_perceptual_layer_2': 1.1891, 'pe_perceptual_layer_3': 0.1984, 'pe_perceptual_layer_4': 0.2603, 'pe_perceptual_layer_5': 1.1721, 'pe_perceptual_layer_6': 1.3123, 'pe_perceptual_layer_7': 1.1883, 'pe_perceptual_layer_8': 0.8161, 'pe_perceptual_layer_9': 0.1379, 'f0_dtw': 26.1341} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@82000: {'total_loss': 56.5429, 'kl_v': 0.0886, 'kl': 0.0886, 'l1': 0.2051, 'ssim': 0.2806, 'wdur': 0.1882, 'abs_word_dur_error': 52.4963, 'abs_sent_dur_error': 0.5495, 'fr': 0.8176, 'pcr': 0.8558, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.4059, 'pe_perceptual_layer_1': 1.0605, 'pe_perceptual_layer_2': 1.1865, 'pe_perceptual_layer_3': 0.1972, 'pe_perceptual_layer_4': 0.26, 'pe_perceptual_layer_5': 1.1722, 'pe_perceptual_layer_6': 1.3124, 'pe_perceptual_layer_7': 1.1883, 'pe_perceptual_layer_8': 0.8197, 'pe_perceptual_layer_9': 0.1385} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@84000: {'total_loss': 56.8526, 'kl_v': 0.0864, 'kl': 0.0864, 'l1': 0.2063, 'ssim': 0.2823, 'wdur': 0.1883, 'abs_word_dur_error': 52.8247, 'abs_sent_dur_error': 0.5327, 'fr': 0.8172, 'pcr': 0.8558, 'dfr': 0.9723, 'pe_perceptual_layer_0': 0.4028, 'pe_perceptual_layer_1': 1.052, 'pe_perceptual_layer_2': 1.1817, 'pe_perceptual_layer_3': 0.1965, 'pe_perceptual_layer_4': 0.2594, 'pe_perceptual_layer_5': 1.1698, 'pe_perceptual_layer_6': 1.307, 'pe_perceptual_layer_7': 1.1778, 'pe_perceptual_layer_8': 0.8159, 'pe_perceptual_layer_9': 0.1395} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@86000: {'total_loss': 56.5707, 'kl_v': 0.086, 'kl': 0.086, 'l1': 0.2068, 'ssim': 0.283, 'wdur': 0.1885, 'abs_word_dur_error': 52.51, 'abs_sent_dur_error': 0.5676, 'fr': 0.8157, 'pcr': 0.8547, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4124, 'pe_perceptual_layer_1': 1.0735, 'pe_perceptual_layer_2': 1.1974, 'pe_perceptual_layer_3': 0.1996, 'pe_perceptual_layer_4': 0.2632, 'pe_perceptual_layer_5': 1.1813, 'pe_perceptual_layer_6': 1.3245, 'pe_perceptual_layer_7': 1.2048, 'pe_perceptual_layer_8': 0.8264, 'pe_perceptual_layer_9': 0.1381} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@88000: {'total_loss': 56.346, 'kl_v': 0.0794, 'kl': 0.0794, 'l1': 0.2079, 'ssim': 0.2845, 'wdur': 0.1878, 'abs_word_dur_error': 52.3298, 'abs_sent_dur_error': 0.5322, 'fr': 0.8172, 'pcr': 0.8553, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.4174, 'pe_perceptual_layer_1': 1.0871, 'pe_perceptual_layer_2': 1.2102, 'pe_perceptual_layer_3': 0.2018, 'pe_perceptual_layer_4': 0.2669, 'pe_perceptual_layer_5': 1.1894, 'pe_perceptual_layer_6': 1.3318, 'pe_perceptual_layer_7': 1.2117, 'pe_perceptual_layer_8': 0.8346, 'pe_perceptual_layer_9': 0.1392} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@90000: {'total_loss': 56.7418, 'kl_v': 0.0842, 'kl': 0.0842, 'l1': 0.2073, 'ssim': 0.2841, 'wdur': 0.1908, 'abs_word_dur_error': 52.7022, 'abs_sent_dur_error': 0.5458, 'fr': 0.8159, 'pcr': 0.855, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4157, 'pe_perceptual_layer_1': 1.0805, 'pe_perceptual_layer_2': 1.2059, 'pe_perceptual_layer_3': 0.201, 'pe_perceptual_layer_4': 0.2652, 'pe_perceptual_layer_5': 1.1854, 'pe_perceptual_layer_6': 1.3275, 'pe_perceptual_layer_7': 1.2073, 'pe_perceptual_layer_8': 0.8294, 'pe_perceptual_layer_9': 0.1389, 'f0_dtw': 27.6032} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@92000: {'total_loss': 57.499, 'kl_v': 0.0809, 'kl': 0.0809, 'l1': 0.2089, 'ssim': 0.2854, 'wdur': 0.1912, 'abs_word_dur_error': 53.4173, 'abs_sent_dur_error': 0.5906, 'fr': 0.8166, 'pcr': 0.8548, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.4163, 'pe_perceptual_layer_1': 1.0815, 'pe_perceptual_layer_2': 1.2076, 'pe_perceptual_layer_3': 0.2006, 'pe_perceptual_layer_4': 0.2656, 'pe_perceptual_layer_5': 1.1856, 'pe_perceptual_layer_6': 1.3294, 'pe_perceptual_layer_7': 1.208, 'pe_perceptual_layer_8': 0.8313, 'pe_perceptual_layer_9': 0.1391} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@94000: {'total_loss': 55.4116, 'kl_v': 0.0763, 'kl': 0.0763, 'l1': 0.2084, 'ssim': 0.2857, 'wdur': 0.1869, 'abs_word_dur_error': 51.4521, 'abs_sent_dur_error': 0.4834, 'fr': 0.8157, 'pcr': 0.8543, 'dfr': 0.9723, 'pe_perceptual_layer_0': 0.4199, 'pe_perceptual_layer_1': 1.0894, 'pe_perceptual_layer_2': 1.2128, 'pe_perceptual_layer_3': 0.2021, 'pe_perceptual_layer_4': 0.2671, 'pe_perceptual_layer_5': 1.1904, 'pe_perceptual_layer_6': 1.3327, 'pe_perceptual_layer_7': 1.2109, 'pe_perceptual_layer_8': 0.8344, 'pe_perceptual_layer_9': 0.1399} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@96000: {'total_loss': 56.7465, 'kl_v': 0.0755, 'kl': 0.0755, 'l1': 0.211, 'ssim': 0.2882, 'wdur': 0.19, 'abs_word_dur_error': 52.7174, 'abs_sent_dur_error': 0.5428, 'fr': 0.8175, 'pcr': 0.8561, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.424, 'pe_perceptual_layer_1': 1.0997, 'pe_perceptual_layer_2': 1.2223, 'pe_perceptual_layer_3': 0.2038, 'pe_perceptual_layer_4': 0.2695, 'pe_perceptual_layer_5': 1.1979, 'pe_perceptual_layer_6': 1.3401, 'pe_perceptual_layer_7': 1.2214, 'pe_perceptual_layer_8': 0.8426, 'pe_perceptual_layer_9': 0.1407} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@98000: {'total_loss': 55.76, 'kl_v': 0.0739, 'kl': 0.0739, 'l1': 0.209, 'ssim': 0.2861, 'wdur': 0.1864, 'abs_word_dur_error': 51.7869, 'abs_sent_dur_error': 0.4991, 'fr': 0.8166, 'pcr': 0.8557, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4188, 'pe_perceptual_layer_1': 1.0895, 'pe_perceptual_layer_2': 1.2143, 'pe_perceptual_layer_3': 0.2019, 'pe_perceptual_layer_4': 0.2678, 'pe_perceptual_layer_5': 1.1931, 'pe_perceptual_layer_6': 1.3347, 'pe_perceptual_layer_7': 1.2097, 'pe_perceptual_layer_8': 0.8378, 'pe_perceptual_layer_9': 0.1409} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@100000: {'total_loss': 55.9355, 'kl_v': 0.0671, 'kl': 0.0671, 'l1': 0.2091, 'ssim': 0.2876, 'wdur': 0.1872, 'abs_word_dur_error': 51.9592, 'abs_sent_dur_error': 0.5153, 'fr': 0.816, 'pcr': 0.8546, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.428, 'pe_perceptual_layer_1': 1.1101, 'pe_perceptual_layer_2': 1.2326, 'pe_perceptual_layer_3': 0.2056, 'pe_perceptual_layer_4': 0.2724, 'pe_perceptual_layer_5': 1.2058, 'pe_perceptual_layer_6': 1.3519, 'pe_perceptual_layer_7': 1.2372, 'pe_perceptual_layer_8': 0.8502, 'pe_perceptual_layer_9': 0.1403, 'f0_dtw': 27.8546} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@102000: {'total_loss': 55.9368, 'kl_v': 0.0744, 'kl': 0.0744, 'l1': 0.209, 'ssim': 0.2876, 'wdur': 0.1867, 'abs_word_dur_error': 51.9773, 'abs_sent_dur_error': 0.4834, 'fr': 0.8163, 'pcr': 0.8551, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.4234, 'pe_perceptual_layer_1': 1.0988, 'pe_perceptual_layer_2': 1.222, 'pe_perceptual_layer_3': 0.2033, 'pe_perceptual_layer_4': 0.2696, 'pe_perceptual_layer_5': 1.1988, 'pe_perceptual_layer_6': 1.3428, 'pe_perceptual_layer_7': 1.2249, 'pe_perceptual_layer_8': 0.8436, 'pe_perceptual_layer_9': 0.1402} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@104000: {'total_loss': 56.3189, 'kl_v': 0.0692, 'kl': 0.0692, 'l1': 0.2103, 'ssim': 0.2881, 'wdur': 0.1895, 'abs_word_dur_error': 52.3444, 'abs_sent_dur_error': 0.5075, 'fr': 0.8147, 'pcr': 0.8538, 'dfr': 0.9723, 'pe_perceptual_layer_0': 0.4253, 'pe_perceptual_layer_1': 1.1029, 'pe_perceptual_layer_2': 1.227, 'pe_perceptual_layer_3': 0.2044, 'pe_perceptual_layer_4': 0.2709, 'pe_perceptual_layer_5': 1.2027, 'pe_perceptual_layer_6': 1.3484, 'pe_perceptual_layer_7': 1.2313, 'pe_perceptual_layer_8': 0.8464, 'pe_perceptual_layer_9': 0.1397} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@106000: {'total_loss': 55.8675, 'kl_v': 0.0668, 'kl': 0.0668, 'l1': 0.2099, 'ssim': 0.2893, 'wdur': 0.1886, 'abs_word_dur_error': 51.8878, 'abs_sent_dur_error': 0.5152, 'fr': 0.8159, 'pcr': 0.8547, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4258, 'pe_perceptual_layer_1': 1.1058, 'pe_perceptual_layer_2': 1.2303, 'pe_perceptual_layer_3': 0.205, 'pe_perceptual_layer_4': 0.2716, 'pe_perceptual_layer_5': 1.2048, 'pe_perceptual_layer_6': 1.3501, 'pe_perceptual_layer_7': 1.2296, 'pe_perceptual_layer_8': 0.8494, 'pe_perceptual_layer_9': 0.1412} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@108000: {'total_loss': 56.0412, 'kl_v': 0.0656, 'kl': 0.0656, 'l1': 0.2118, 'ssim': 0.2918, 'wdur': 0.1889, 'abs_word_dur_error': 52.0637, 'abs_sent_dur_error': 0.511, 'fr': 0.8158, 'pcr': 0.8545, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4308, 'pe_perceptual_layer_1': 1.1151, 'pe_perceptual_layer_2': 1.2365, 'pe_perceptual_layer_3': 0.2059, 'pe_perceptual_layer_4': 0.2737, 'pe_perceptual_layer_5': 1.2092, 'pe_perceptual_layer_6': 1.3549, 'pe_perceptual_layer_7': 1.2389, 'pe_perceptual_layer_8': 0.8527, 'pe_perceptual_layer_9': 0.1415} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@110000: {'total_loss': 56.4355, 'kl_v': 0.0646, 'kl': 0.0646, 'l1': 0.2116, 'ssim': 0.2898, 'wdur': 0.1901, 'abs_word_dur_error': 52.4427, 'abs_sent_dur_error': 0.5277, 'fr': 0.8166, 'pcr': 0.8555, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4303, 'pe_perceptual_layer_1': 1.1157, 'pe_perceptual_layer_2': 1.2385, 'pe_perceptual_layer_3': 0.2067, 'pe_perceptual_layer_4': 0.2737, 'pe_perceptual_layer_5': 1.2116, 'pe_perceptual_layer_6': 1.3574, 'pe_perceptual_layer_7': 1.2417, 'pe_perceptual_layer_8': 0.8558, 'pe_perceptual_layer_9': 0.1414, 'f0_dtw': 27.4981} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@112000: {'total_loss': 56.509, 'kl_v': 0.0643, 'kl': 0.0643, 'l1': 0.2119, 'ssim': 0.2896, 'wdur': 0.1913, 'abs_word_dur_error': 52.5132, 'abs_sent_dur_error': 0.5295, 'fr': 0.8167, 'pcr': 0.8559, 'dfr': 0.9723, 'pe_perceptual_layer_0': 0.4301, 'pe_perceptual_layer_1': 1.1174, 'pe_perceptual_layer_2': 1.2397, 'pe_perceptual_layer_3': 0.2063, 'pe_perceptual_layer_4': 0.274, 'pe_perceptual_layer_5': 1.2137, 'pe_perceptual_layer_6': 1.3582, 'pe_perceptual_layer_7': 1.2398, 'pe_perceptual_layer_8': 0.8561, 'pe_perceptual_layer_9': 0.1418} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@114000: {'total_loss': 56.2784, 'kl_v': 0.0592, 'kl': 0.0592, 'l1': 0.2125, 'ssim': 0.2924, 'wdur': 0.1885, 'abs_word_dur_error': 52.3063, 'abs_sent_dur_error': 0.5167, 'fr': 0.8159, 'pcr': 0.8553, 'dfr': 0.9723, 'pe_perceptual_layer_0': 0.4369, 'pe_perceptual_layer_1': 1.1281, 'pe_perceptual_layer_2': 1.2499, 'pe_perceptual_layer_3': 0.2079, 'pe_perceptual_layer_4': 0.2762, 'pe_perceptual_layer_5': 1.2171, 'pe_perceptual_layer_6': 1.3648, 'pe_perceptual_layer_7': 1.2497, 'pe_perceptual_layer_8': 0.8593, 'pe_perceptual_layer_9': 0.1412} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@116000: {'total_loss': 56.3646, 'kl_v': 0.0574, 'kl': 0.0574, 'l1': 0.2125, 'ssim': 0.2934, 'wdur': 0.1877, 'abs_word_dur_error': 52.3947, 'abs_sent_dur_error': 0.5201, 'fr': 0.8154, 'pcr': 0.8538, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4387, 'pe_perceptual_layer_1': 1.1335, 'pe_perceptual_layer_2': 1.2542, 'pe_perceptual_layer_3': 0.2087, 'pe_perceptual_layer_4': 0.278, 'pe_perceptual_layer_5': 1.2249, 'pe_perceptual_layer_6': 1.3719, 'pe_perceptual_layer_7': 1.2566, 'pe_perceptual_layer_8': 0.8653, 'pe_perceptual_layer_9': 0.1414} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@118000: {'total_loss': 56.6008, 'kl_v': 0.0603, 'kl': 0.0603, 'l1': 0.2127, 'ssim': 0.2915, 'wdur': 0.1903, 'abs_word_dur_error': 52.6161, 'abs_sent_dur_error': 0.525, 'fr': 0.8168, 'pcr': 0.8555, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4365, 'pe_perceptual_layer_1': 1.1283, 'pe_perceptual_layer_2': 1.2466, 'pe_perceptual_layer_3': 0.2072, 'pe_perceptual_layer_4': 0.2754, 'pe_perceptual_layer_5': 1.2162, 'pe_perceptual_layer_6': 1.3659, 'pe_perceptual_layer_7': 1.2549, 'pe_perceptual_layer_8': 0.8588, 'pe_perceptual_layer_9': 0.1404} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@120000: {'total_loss': 56.3565, 'kl_v': 0.0589, 'kl': 0.0589, 'l1': 0.2115, 'ssim': 0.2916, 'wdur': 0.1883, 'abs_word_dur_error': 52.3937, 'abs_sent_dur_error': 0.5078, 'fr': 0.8172, 'pcr': 0.8561, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4357, 'pe_perceptual_layer_1': 1.1285, 'pe_perceptual_layer_2': 1.2494, 'pe_perceptual_layer_3': 0.208, 'pe_perceptual_layer_4': 0.2761, 'pe_perceptual_layer_5': 1.2188, 'pe_perceptual_layer_6': 1.3661, 'pe_perceptual_layer_7': 1.2503, 'pe_perceptual_layer_8': 0.861, 'pe_perceptual_layer_9': 0.1418, 'f0_dtw': 27.3801} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@122000: {'total_loss': 56.4081, 'kl_v': 0.0585, 'kl': 0.0585, 'l1': 0.2115, 'ssim': 0.2913, 'wdur': 0.1896, 'abs_word_dur_error': 52.4449, 'abs_sent_dur_error': 0.5086, 'fr': 0.8167, 'pcr': 0.8562, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.4382, 'pe_perceptual_layer_1': 1.1331, 'pe_perceptual_layer_2': 1.2529, 'pe_perceptual_layer_3': 0.2087, 'pe_perceptual_layer_4': 0.2781, 'pe_perceptual_layer_5': 1.2229, 'pe_perceptual_layer_6': 1.3725, 'pe_perceptual_layer_7': 1.2604, 'pe_perceptual_layer_8': 0.8659, 'pe_perceptual_layer_9': 0.1416} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@124000: {'total_loss': 57.6408, 'kl_v': 0.0598, 'kl': 0.0598, 'l1': 0.2117, 'ssim': 0.2919, 'wdur': 0.1945, 'abs_word_dur_error': 53.6179, 'abs_sent_dur_error': 0.5617, 'fr': 0.8161, 'pcr': 0.855, 'dfr': 0.9725, 'pe_perceptual_layer_0': 0.437, 'pe_perceptual_layer_1': 1.1296, 'pe_perceptual_layer_2': 1.2498, 'pe_perceptual_layer_3': 0.2082, 'pe_perceptual_layer_4': 0.2766, 'pe_perceptual_layer_5': 1.2199, 'pe_perceptual_layer_6': 1.3678, 'pe_perceptual_layer_7': 1.2566, 'pe_perceptual_layer_8': 0.8616, 'pe_perceptual_layer_9': 0.1412} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@126000: {'total_loss': 57.2581, 'kl_v': 0.0546, 'kl': 0.0546, 'l1': 0.2131, 'ssim': 0.2931, 'wdur': 0.1926, 'abs_word_dur_error': 53.2536, 'abs_sent_dur_error': 0.5532, 'fr': 0.8157, 'pcr': 0.8552, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4397, 'pe_perceptual_layer_1': 1.1362, 'pe_perceptual_layer_2': 1.2559, 'pe_perceptual_layer_3': 0.2092, 'pe_perceptual_layer_4': 0.2779, 'pe_perceptual_layer_5': 1.2232, 'pe_perceptual_layer_6': 1.3721, 'pe_perceptual_layer_7': 1.2586, 'pe_perceptual_layer_8': 0.866, 'pe_perceptual_layer_9': 0.142} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@128000: {'total_loss': 56.2201, 'kl_v': 0.0608, 'kl': 0.0608, 'l1': 0.2136, 'ssim': 0.2927, 'wdur': 0.1893, 'abs_word_dur_error': 52.2509, 'abs_sent_dur_error': 0.5086, 'fr': 0.8161, 'pcr': 0.8549, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4368, 'pe_perceptual_layer_1': 1.1279, 'pe_perceptual_layer_2': 1.2489, 'pe_perceptual_layer_3': 0.2078, 'pe_perceptual_layer_4': 0.2763, 'pe_perceptual_layer_5': 1.2216, 'pe_perceptual_layer_6': 1.3691, 'pe_perceptual_layer_7': 1.2546, 'pe_perceptual_layer_8': 0.8626, 'pe_perceptual_layer_9': 0.1421} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@130000: {'total_loss': 56.9267, 'kl_v': 0.0584, 'kl': 0.0584, 'l1': 0.2119, 'ssim': 0.2918, 'wdur': 0.1909, 'abs_word_dur_error': 52.9235, 'abs_sent_dur_error': 0.5478, 'fr': 0.8166, 'pcr': 0.855, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4398, 'pe_perceptual_layer_1': 1.1346, 'pe_perceptual_layer_2': 1.2549, 'pe_perceptual_layer_3': 0.2089, 'pe_perceptual_layer_4': 0.2782, 'pe_perceptual_layer_5': 1.2215, 'pe_perceptual_layer_6': 1.3726, 'pe_perceptual_layer_7': 1.2604, 'pe_perceptual_layer_8': 0.8659, 'pe_perceptual_layer_9': 0.1413, 'f0_dtw': 26.7893} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@132000: {'total_loss': 56.9549, 'kl_v': 0.0509, 'kl': 0.0509, 'l1': 0.2144, 'ssim': 0.2955, 'wdur': 0.1921, 'abs_word_dur_error': 52.9572, 'abs_sent_dur_error': 0.5487, 'fr': 0.8172, 'pcr': 0.8557, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4447, 'pe_perceptual_layer_1': 1.1472, 'pe_perceptual_layer_2': 1.2647, 'pe_perceptual_layer_3': 0.2104, 'pe_perceptual_layer_4': 0.2807, 'pe_perceptual_layer_5': 1.2298, 'pe_perceptual_layer_6': 1.38, 'pe_perceptual_layer_7': 1.267, 'pe_perceptual_layer_8': 0.8727, 'pe_perceptual_layer_9': 0.1426} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@134000: {'total_loss': 57.4069, 'kl_v': 0.0526, 'kl': 0.0526, 'l1': 0.2125, 'ssim': 0.2928, 'wdur': 0.1937, 'abs_word_dur_error': 53.3938, 'abs_sent_dur_error': 0.5645, 'fr': 0.8166, 'pcr': 0.8554, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4412, 'pe_perceptual_layer_1': 1.1414, 'pe_perceptual_layer_2': 1.2619, 'pe_perceptual_layer_3': 0.2106, 'pe_perceptual_layer_4': 0.28, 'pe_perceptual_layer_5': 1.2272, 'pe_perceptual_layer_6': 1.3772, 'pe_perceptual_layer_7': 1.2651, 'pe_perceptual_layer_8': 0.8702, 'pe_perceptual_layer_9': 0.1426} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@136000: {'total_loss': 56.6186, 'kl_v': 0.0499, 'kl': 0.0499, 'l1': 0.2148, 'ssim': 0.2938, 'wdur': 0.1908, 'abs_word_dur_error': 52.6397, 'abs_sent_dur_error': 0.5351, 'fr': 0.8168, 'pcr': 0.8553, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4448, 'pe_perceptual_layer_1': 1.148, 'pe_perceptual_layer_2': 1.2671, 'pe_perceptual_layer_3': 0.2112, 'pe_perceptual_layer_4': 0.2816, 'pe_perceptual_layer_5': 1.2325, 'pe_perceptual_layer_6': 1.3826, 'pe_perceptual_layer_7': 1.2709, 'pe_perceptual_layer_8': 0.8752, 'pe_perceptual_layer_9': 0.1427} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@138000: {'total_loss': 56.623, 'kl_v': 0.0492, 'kl': 0.0492, 'l1': 0.2142, 'ssim': 0.2939, 'wdur': 0.1912, 'abs_word_dur_error': 52.6447, 'abs_sent_dur_error': 0.5376, 'fr': 0.8159, 'pcr': 0.8548, 'dfr': 0.9723, 'pe_perceptual_layer_0': 0.4437, 'pe_perceptual_layer_1': 1.1437, 'pe_perceptual_layer_2': 1.2623, 'pe_perceptual_layer_3': 0.2101, 'pe_perceptual_layer_4': 0.2802, 'pe_perceptual_layer_5': 1.2295, 'pe_perceptual_layer_6': 1.379, 'pe_perceptual_layer_7': 1.2681, 'pe_perceptual_layer_8': 0.8729, 'pe_perceptual_layer_9': 0.1423} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@140000: {'total_loss': 56.4233, 'kl_v': 0.0475, 'kl': 0.0475, 'l1': 0.2147, 'ssim': 0.2952, 'wdur': 0.1897, 'abs_word_dur_error': 52.4795, 'abs_sent_dur_error': 0.507, 'fr': 0.8158, 'pcr': 0.8541, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4481, 'pe_perceptual_layer_1': 1.155, 'pe_perceptual_layer_2': 1.2736, 'pe_perceptual_layer_3': 0.2125, 'pe_perceptual_layer_4': 0.2829, 'pe_perceptual_layer_5': 1.2343, 'pe_perceptual_layer_6': 1.3863, 'pe_perceptual_layer_7': 1.2765, 'pe_perceptual_layer_8': 0.8767, 'pe_perceptual_layer_9': 0.142, 'f0_dtw': 26.728} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@142000: {'total_loss': 56.6032, 'kl_v': 0.0447, 'kl': 0.0447, 'l1': 0.2143, 'ssim': 0.2946, 'wdur': 0.1897, 'abs_word_dur_error': 52.6567, 'abs_sent_dur_error': 0.5165, 'fr': 0.8153, 'pcr': 0.8544, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.448, 'pe_perceptual_layer_1': 1.1564, 'pe_perceptual_layer_2': 1.2737, 'pe_perceptual_layer_3': 0.2122, 'pe_perceptual_layer_4': 0.283, 'pe_perceptual_layer_5': 1.2361, 'pe_perceptual_layer_6': 1.3871, 'pe_perceptual_layer_7': 1.2767, 'pe_perceptual_layer_8': 0.8788, 'pe_perceptual_layer_9': 0.1434} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@144000: {'total_loss': 56.9392, 'kl_v': 0.0466, 'kl': 0.0466, 'l1': 0.2141, 'ssim': 0.2948, 'wdur': 0.1926, 'abs_word_dur_error': 52.9583, 'abs_sent_dur_error': 0.5444, 'fr': 0.8152, 'pcr': 0.8541, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4483, 'pe_perceptual_layer_1': 1.1552, 'pe_perceptual_layer_2': 1.2711, 'pe_perceptual_layer_3': 0.2118, 'pe_perceptual_layer_4': 0.2824, 'pe_perceptual_layer_5': 1.2364, 'pe_perceptual_layer_6': 1.3882, 'pe_perceptual_layer_7': 1.2819, 'pe_perceptual_layer_8': 0.8804, 'pe_perceptual_layer_9': 0.1424} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@146000: {'total_loss': 56.5997, 'kl_v': 0.0445, 'kl': 0.0445, 'l1': 0.2158, 'ssim': 0.2962, 'wdur': 0.1909, 'abs_word_dur_error': 52.6378, 'abs_sent_dur_error': 0.5282, 'fr': 0.8152, 'pcr': 0.8543, 'dfr': 0.9723, 'pe_perceptual_layer_0': 0.4479, 'pe_perceptual_layer_1': 1.1541, 'pe_perceptual_layer_2': 1.2714, 'pe_perceptual_layer_3': 0.2117, 'pe_perceptual_layer_4': 0.2823, 'pe_perceptual_layer_5': 1.2375, 'pe_perceptual_layer_6': 1.3881, 'pe_perceptual_layer_7': 1.275, 'pe_perceptual_layer_8': 0.8796, 'pe_perceptual_layer_9': 0.1432} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@148000: {'total_loss': 56.2954, 'kl_v': 0.0418, 'kl': 0.0418, 'l1': 0.2148, 'ssim': 0.2959, 'wdur': 0.1898, 'abs_word_dur_error': 52.3577, 'abs_sent_dur_error': 0.5103, 'fr': 0.8161, 'pcr': 0.8548, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4518, 'pe_perceptual_layer_1': 1.165, 'pe_perceptual_layer_2': 1.2814, 'pe_perceptual_layer_3': 0.2136, 'pe_perceptual_layer_4': 0.2852, 'pe_perceptual_layer_5': 1.242, 'pe_perceptual_layer_6': 1.3937, 'pe_perceptual_layer_7': 1.2858, 'pe_perceptual_layer_8': 0.8856, 'pe_perceptual_layer_9': 0.1438} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@150000: {'total_loss': 57.0439, 'kl_v': 0.0438, 'kl': 0.0438, 'l1': 0.2154, 'ssim': 0.2969, 'wdur': 0.1902, 'abs_word_dur_error': 53.0723, 'abs_sent_dur_error': 0.5372, 'fr': 0.8162, 'pcr': 0.8558, 'dfr': 0.9723, 'pe_perceptual_layer_0': 0.4506, 'pe_perceptual_layer_1': 1.1593, 'pe_perceptual_layer_2': 1.2762, 'pe_perceptual_layer_3': 0.2124, 'pe_perceptual_layer_4': 0.2839, 'pe_perceptual_layer_5': 1.2405, 'pe_perceptual_layer_6': 1.39, 'pe_perceptual_layer_7': 1.2798, 'pe_perceptual_layer_8': 0.8807, 'pe_perceptual_layer_9': 0.143, 'f0_dtw': 27.5799} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@152000: {'total_loss': 56.3909, 'kl_v': 0.043, 'kl': 0.043, 'l1': 0.2144, 'ssim': 0.2959, 'wdur': 0.1887, 'abs_word_dur_error': 52.4497, 'abs_sent_dur_error': 0.5134, 'fr': 0.8158, 'pcr': 0.8545, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4513, 'pe_perceptual_layer_1': 1.1622, 'pe_perceptual_layer_2': 1.2787, 'pe_perceptual_layer_3': 0.2129, 'pe_perceptual_layer_4': 0.2839, 'pe_perceptual_layer_5': 1.2403, 'pe_perceptual_layer_6': 1.3937, 'pe_perceptual_layer_7': 1.2851, 'pe_perceptual_layer_8': 0.8831, 'pe_perceptual_layer_9': 0.1432} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@154000: {'total_loss': 56.4142, 'kl_v': 0.0412, 'kl': 0.0412, 'l1': 0.2153, 'ssim': 0.2961, 'wdur': 0.191, 'abs_word_dur_error': 52.4787, 'abs_sent_dur_error': 0.5058, 'fr': 0.8167, 'pcr': 0.8559, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4524, 'pe_perceptual_layer_1': 1.1642, 'pe_perceptual_layer_2': 1.2805, 'pe_perceptual_layer_3': 0.2133, 'pe_perceptual_layer_4': 0.2845, 'pe_perceptual_layer_5': 1.2421, 'pe_perceptual_layer_6': 1.3933, 'pe_perceptual_layer_7': 1.2841, 'pe_perceptual_layer_8': 0.8831, 'pe_perceptual_layer_9': 0.1434} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@156000: {'total_loss': 56.858, 'kl_v': 0.0384, 'kl': 0.0384, 'l1': 0.2156, 'ssim': 0.2972, 'wdur': 0.1915, 'abs_word_dur_error': 52.9069, 'abs_sent_dur_error': 0.5258, 'fr': 0.8164, 'pcr': 0.8554, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.454, 'pe_perceptual_layer_1': 1.1676, 'pe_perceptual_layer_2': 1.2826, 'pe_perceptual_layer_3': 0.2138, 'pe_perceptual_layer_4': 0.2856, 'pe_perceptual_layer_5': 1.2458, 'pe_perceptual_layer_6': 1.3981, 'pe_perceptual_layer_7': 1.2894, 'pe_perceptual_layer_8': 0.8871, 'pe_perceptual_layer_9': 0.1431} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@158000: {'total_loss': 56.4681, 'kl_v': 0.0382, 'kl': 0.0382, 'l1': 0.2156, 'ssim': 0.2974, 'wdur': 0.1899, 'abs_word_dur_error': 52.5351, 'abs_sent_dur_error': 0.5127, 'fr': 0.8151, 'pcr': 0.8537, 'dfr': 0.9723, 'pe_perceptual_layer_0': 0.4537, 'pe_perceptual_layer_1': 1.1665, 'pe_perceptual_layer_2': 1.282, 'pe_perceptual_layer_3': 0.2131, 'pe_perceptual_layer_4': 0.2849, 'pe_perceptual_layer_5': 1.2434, 'pe_perceptual_layer_6': 1.3943, 'pe_perceptual_layer_7': 1.2835, 'pe_perceptual_layer_8': 0.8832, 'pe_perceptual_layer_9': 0.1435} +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@160000: {'total_loss': 56.9731, 'kl_v': 0.0339, 'kl': 0.0339, 'l1': 0.2168, 'ssim': 0.2999, 'wdur': 0.1932, 'abs_word_dur_error': 53.032, 'abs_sent_dur_error': 0.5197, 'fr': 0.816, 'pcr': 0.8553, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4597, 'pe_perceptual_layer_1': 1.1811, 'pe_perceptual_layer_2': 1.2955, 'pe_perceptual_layer_3': 0.2165, 'pe_perceptual_layer_4': 0.2888, 'pe_perceptual_layer_5': 1.2532, 'pe_perceptual_layer_6': 1.407, 'pe_perceptual_layer_7': 1.3017, 'pe_perceptual_layer_8': 0.8931, 'pe_perceptual_layer_9': 0.143, 'f0_dtw': 26.967} +| Training end.. +Epoch 0 ended. Steps: 160001. {'total_loss': 0.3957, 'kl_v': 0.0064, 'kl': 0.0064, 'l1': 0.1031, 'ssim': 0.1296, 'wdur': 0.0876, 'abs_word_dur_error': 27.5254, 'abs_sent_dur_error': 0.2028, 'fr': 0.8163, 'pcr': 0.8521, 'dfr': 0.9707, 'a': 0.3346, 'batch_size': 46.2205, 'r': 0.2251, 'f': 0.2227} diff --git a/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221212192440.txt b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221212192440.txt new file mode 100644 index 0000000000000000000000000000000000000000..f1bf73b7480e5da5dceb55e226ca1b6f33e5906c --- /dev/null +++ b/text_to_speech/checkpoints/ljspeech/ps_adv_baseline/terminal_logs/log_20221212192440.txt @@ -0,0 +1,630 @@ +| Copied codes to checkpoints/1130_lj/ps_adv_baseline/codes/20221212192440. +| model Arch: PortaSpeech( + (dur_predictor): DurationPredictor( + (conv): ModuleList( + (0): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + (1): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + (2): Sequential( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): ReLU() + (2): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (3): Dropout(p=0.2, inplace=False) + ) + ) + (linear): Sequential( + (0): Linear(in_features=192, out_features=1, bias=True) + (1): Softplus(beta=1, threshold=20) + ) + ) + (length_regulator): LengthRegulator() + (ph_encoder): RelTransformerEncoder( + (emb): Embedding(79, 192, padding_idx=0) + (pre): ConvReluNorm( + (conv_layers): ModuleList( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + ) + (norm_layers): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + ) + (relu_drop): Sequential( + (0): ReLU() + (1): Dropout(p=0, inplace=False) + ) + (proj): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + ) + (encoder): Encoder( + (drop): Dropout(p=0.0, inplace=False) + (attn_layers): ModuleList( + (0): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_1): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (ffn_layers): ModuleList( + (0): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): FFN( + (conv_1): Conv1d(192, 768, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(768, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_2): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (last_ln): LayerNorm() + ) + ) + (ph2word_encoder): RelTransformerEncoder( + (pre): ConvReluNorm( + (conv_layers): ModuleList( + (0): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + ) + (norm_layers): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + ) + (relu_drop): Sequential( + (0): ReLU() + (1): Dropout(p=0, inplace=False) + ) + (proj): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + ) + (encoder): Encoder( + (drop): Dropout(p=0.0, inplace=False) + (attn_layers): ModuleList( + (0): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): MultiHeadAttention( + (conv_q): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_k): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_v): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (conv_o): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_1): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (ffn_layers): ModuleList( + (0): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (1): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (2): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + (3): FFN( + (conv_1): Conv1d(192, 192, kernel_size=(5,), stride=(1,), padding=(2,)) + (conv_2): Conv1d(192, 192, kernel_size=(1,), stride=(1,)) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (norm_layers_2): ModuleList( + (0): LayerNorm() + (1): LayerNorm() + (2): LayerNorm() + (3): LayerNorm() + ) + (last_ln): LayerNorm() + ) + ) + (sin_pos): SinusoidalPosEmb() + (enc_pos_proj): Linear(in_features=384, out_features=192, bias=True) + (dec_query_proj): Linear(in_features=384, out_features=192, bias=True) + (dec_res_proj): Linear(in_features=384, out_features=192, bias=True) + (attn): MultiheadAttention( + (out_proj): Linear(in_features=192, out_features=192, bias=False) + ) + (text_encoder_postnet): ConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (fvae): FVAE( + (g_pre_net): Sequential( + (0): Conv1d(192, 192, kernel_size=(8,), stride=(4,), padding=(2,)) + ) + (encoder): FVAEEncoder( + (pre_net): Sequential( + (0): Conv1d(80, 192, kernel_size=(8,), stride=(4,), padding=(2,)) + ) + (nn): ConditionalConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (3): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (4): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (5): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (6): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (7): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + (g_prenet): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (out_proj): Conv1d(192, 32, kernel_size=(1,), stride=(1,)) + ) + (prior_flow): ResFlow( + (flows): ModuleList( + (0): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (1): FlipLayer() + (2): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (3): FlipLayer() + (4): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (5): FlipLayer() + (6): CouplingLayer( + (pre): Conv1d(8, 64, kernel_size=(1,), stride=(1,)) + (enc): WN( + (in_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (1): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (2): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + (3): Conv1d(64, 128, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (res_skip_layers): ModuleList( + (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (1): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (2): Conv1d(64, 128, kernel_size=(1,), stride=(1,)) + (3): Conv1d(64, 64, kernel_size=(1,), stride=(1,)) + ) + (drop): Dropout(p=0, inplace=False) + (cond_layer): Conv1d(192, 512, kernel_size=(1,), stride=(1,)) + ) + (post): Conv1d(64, 8, kernel_size=(1,), stride=(1,)) + ) + (7): FlipLayer() + ) + ) + (decoder): FVAEDecoder( + (pre_net): Sequential( + (0): ConvTranspose1d(16, 192, kernel_size=(4,), stride=(4,)) + ) + (nn): ConditionalConvBlocks( + (res_blocks): Sequential( + (0): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (1): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (2): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + (3): ResidualBlock( + (blocks): ModuleList( + (0): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + (1): Sequential( + (0): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (1): Conv1d(192, 384, kernel_size=(5,), stride=(1,), padding=(2,)) + (2): LambdaLayer() + (3): GELU() + (4): Conv1d(384, 192, kernel_size=(1,), stride=(1,)) + ) + ) + ) + ) + (last_norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True) + (post_net1): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + (g_prenet): Conv1d(192, 192, kernel_size=(3,), stride=(1,), padding=(1,)) + ) + (out_proj): Conv1d(192, 80, kernel_size=(1,), stride=(1,)) + ) + ) + (word_pos_proj): Linear(in_features=192, out_features=192, bias=True) +) +| model Trainable Parameters: 23.020M +| dur_predictor Trainable Parameters: 0.555M +| length_regulator Trainable Parameters: 0.000M +| ph_encoder Trainable Parameters: 4.753M +| ph2word_encoder Trainable Parameters: 2.081M +| sin_pos Trainable Parameters: 0.000M +| enc_pos_proj Trainable Parameters: 0.074M +| dec_query_proj Trainable Parameters: 0.074M +| dec_res_proj Trainable Parameters: 0.074M +| attn Trainable Parameters: 0.147M +| text_encoder_postnet Trainable Parameters: 2.771M +| fvae Trainable Parameters: 12.453M +| word_pos_proj Trainable Parameters: 0.037M +| fvae.g_pre_net Trainable Parameters: 0.295M +| fvae.encoder Trainable Parameters: 7.444M +| fvae.prior_flow Trainable Parameters: 0.917M +| fvae.decoder Trainable Parameters: 3.796M +| load 'model_gen' from 'checkpoints/hifi_lj/model_ckpt_steps_2076000.ckpt'. +| load 'model' from 'checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt'. +| Validation results@160001: {'total_loss': 57.0476, 'kl_v': 0.0342, 'kl': 0.0342, 'l1': 0.2166, 'ssim': 0.2998, 'wdur': 0.1935, 'abs_word_dur_error': 53.0982, 'abs_sent_dur_error': 0.5271, 'fr': 0.8162, 'pcr': 0.8554, 'dfr': 0.9724, 'pe_perceptual_layer_0': 0.4594, 'pe_perceptual_layer_1': 1.1802, 'pe_perceptual_layer_2': 1.2947, 'pe_perceptual_layer_3': 0.2163, 'pe_perceptual_layer_4': 0.2885, 'pe_perceptual_layer_5': 1.2523, 'pe_perceptual_layer_6': 1.4063, 'pe_perceptual_layer_7': 1.3011, 'pe_perceptual_layer_8': 0.8934, 'pe_perceptual_layer_9': 0.1429, 'f0_dtw': 24.5873} +| Training end.. +Epoch 0 ended. Steps: 160002. {'total_loss': 0.4107, 'kl_v': 0.0038, 'kl': 0.0038, 'l1': 0.1018, 'ssim': 0.1246, 'wdur': 0.0897, 'abs_word_dur_error': 25.7859, 'abs_sent_dur_error': 0.1609, 'fr': 0.8135, 'pcr': 0.8455, 'dfr': 0.9731, 'a': 0.2994, 'batch_size': 46.0, 'r': 0.2429, 'f': 0.2436} diff --git a/text_to_speech/data_gen/tts/__pycache__/base_binarizer.cpython-38.pyc b/text_to_speech/data_gen/tts/__pycache__/base_binarizer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3adb0c9d0cd05783d6218d2b8efe24d74409df4f Binary files /dev/null and b/text_to_speech/data_gen/tts/__pycache__/base_binarizer.cpython-38.pyc differ diff --git a/text_to_speech/data_gen/tts/__pycache__/base_preprocess.cpython-38.pyc b/text_to_speech/data_gen/tts/__pycache__/base_preprocess.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f3cc7e7e073751595af652b67b2fb4b76b2f6bb Binary files /dev/null and b/text_to_speech/data_gen/tts/__pycache__/base_preprocess.cpython-38.pyc differ diff --git a/text_to_speech/data_gen/tts/base_binarizer.py b/text_to_speech/data_gen/tts/base_binarizer.py new file mode 100644 index 0000000000000000000000000000000000000000..3cbc6336c45fbcd3693b3216c6f0eb62cafe055d --- /dev/null +++ b/text_to_speech/data_gen/tts/base_binarizer.py @@ -0,0 +1,412 @@ +import json +import os +import random +from re import L +import traceback +from functools import partial + +import numpy as np +from resemblyzer import VoiceEncoder +from tqdm import tqdm + +from transformers import AutoTokenizer + +# import utils.commons.single_thread_env # NOQA +from text_to_speech.utils.audio import librosa_wav2spec +from text_to_speech.utils.audio.align import get_mel2ph, mel2token_to_dur +from text_to_speech.utils.audio.cwt import get_lf0_cwt, get_cont_lf0 +from text_to_speech.utils.audio.pitch.utils import f0_to_coarse +from text_to_speech.utils.audio.pitch_extractors import extract_pitch_simple +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.commons.indexed_datasets import IndexedDatasetBuilder +from text_to_speech.utils.commons.multiprocess_utils import multiprocess_run_tqdm +from text_to_speech.utils.os_utils import remove_file, copy_file + +np.seterr(divide='ignore', invalid='ignore') + + +class BinarizationError(Exception): + pass + +sentence2graph_parser = None +bert_tokenizer = None +use_graph = False +use_bpe = True + + +class BaseBinarizer: + def __init__(self, processed_data_dir=None): + if processed_data_dir is None: + processed_data_dir = hparams['processed_data_dir'] + self.processed_data_dir = processed_data_dir + self.binarization_args = hparams['binarization_args'] + self.items = {} + self.item_names = [] + + global sentence2graph_parser + global use_graph + global use_bpe + global bert_tokenizer + if use_graph: + from text_to_speech.modules.tts.syntaspeech.syntactic_graph_buider import Sentence2GraphParser + + if hparams['ds_name'] in ['libritts', 'librispeech']: + # Unfortunately, we found when processing libritts with multi-processing will incur pytorch.multiprocessing ERROR + # so we use single thread with cuda graph builder + # it take about 20 hours in a PC with 24-cores-cpu and a RTX2080Ti to process the whole LibriTTS + # so run the binarization and take a break! + if use_graph: + sentence2graph_parser = Sentence2GraphParser("en", use_gpu=True) + if use_bpe: + model_name = 'bert-base-uncased' + tokenizer_kwargs = {'cache_dir': None, 'use_fast': True, 'revision': 'main', 'use_auth_token': None} + bert_tokenizer = AutoTokenizer.from_pretrained(model_name, **tokenizer_kwargs) + elif hparams['ds_name'] == 'ljspeech': + # use multi-processing, thus gpu is disabled + # it takes about 30 minutes for binarization + if use_graph: + sentence2graph_parser = Sentence2GraphParser("en", use_gpu=False) + if use_bpe: + model_name = 'bert-base-uncased' + tokenizer_kwargs = {'cache_dir': None, 'use_fast': True, 'revision': 'main', 'use_auth_token': None} + bert_tokenizer = AutoTokenizer.from_pretrained(model_name, **tokenizer_kwargs) + elif hparams['preprocess_args']['txt_processor'] == 'zh': + # use multi-processing, thus gpu is disabled + # it takes about 30 minutes for binarization + if use_graph: + sentence2graph_parser = Sentence2GraphParser("zh", use_gpu=False) + if use_bpe: + model_name = 'bert-base-chinese' + tokenizer_kwargs = {'cache_dir': None, 'use_fast': True, 'revision': 'main', 'use_auth_token': None} + bert_tokenizer = AutoTokenizer.from_pretrained(model_name, **tokenizer_kwargs) + else: + pass + + def load_meta_data(self): + processed_data_dir = self.processed_data_dir + items_list = json.load(open(f"{processed_data_dir}/metadata.json")) + for r in tqdm(items_list, desc='Loading meta data.'): + item_name = r['item_name'] + self.items[item_name] = r + self.item_names.append(item_name) + if self.binarization_args['shuffle']: + random.seed(1234) + random.shuffle(self.item_names) + + @property + def train_item_names(self): + range_ = self._convert_range(self.binarization_args['train_range']) + return self.item_names[range_[0]:range_[1]] + + @property + def valid_item_names(self): + range_ = self._convert_range(self.binarization_args['valid_range']) + return self.item_names[range_[0]:range_[1]] + + @property + def test_item_names(self): + range_ = self._convert_range(self.binarization_args['test_range']) + return self.item_names[range_[0]:range_[1]] + + def _convert_range(self, range_): + if range_[1] == -1: + range_[1] = len(self.item_names) + return range_ + + def meta_data(self, prefix): + if prefix == 'valid': + item_names = self.valid_item_names + elif prefix == 'test': + item_names = self.test_item_names + else: + item_names = self.train_item_names + for item_name in item_names: + yield self.items[item_name] + + def process(self): + self.load_meta_data() + os.makedirs(hparams['binary_data_dir'], exist_ok=True) + for fn in ['phone_set.json', 'word_set.json', 'spk_map.json']: + remove_file(f"{hparams['binary_data_dir']}/{fn}") + copy_file(f"{hparams['processed_data_dir']}/{fn}", f"{hparams['binary_data_dir']}/{fn}") + if hparams['ds_name'] in ['ljspeech', 'biaobei', 'wenetspeech']: + self.process_data('valid') + self.process_data('test') + self.process_data('train') + elif hparams['ds_name'] in ['libritts', 'librispeech']: + self.process_data_single_processing('valid') + self.process_data_single_processing('test') + self.process_data_single_processing('train') + else: + self.process_data('valid') + self.process_data('test') + self.process_data('train') + # raise NotImplementedError + + def process_data(self, prefix): + data_dir = hparams['binary_data_dir'] + builder = IndexedDatasetBuilder(f'{data_dir}/{prefix}') + meta_data = list(self.meta_data(prefix)) + process_item = partial(self.process_item, binarization_args=self.binarization_args) + ph_lengths = [] + mel_lengths = [] + total_sec = 0 + items = [] + args = [{'item': item} for item in meta_data] + + for item_id, item in multiprocess_run_tqdm(process_item, args, desc='Processing data'): + if item is not None: + items.append(item) + if self.binarization_args['with_spk_embed']: + args = [{'wav': item['wav']} for item in items] + for item_id, spk_embed in multiprocess_run_tqdm( + self.get_spk_embed, args, + init_ctx_func=lambda wid: {'voice_encoder': VoiceEncoder().cuda()}, num_workers=4, + desc='Extracting spk embed'): + items[item_id]['spk_embed'] = spk_embed + + for item in items: + if not self.binarization_args['with_wav'] and 'wav' in item: + del item['wav'] + builder.add_item(item) + mel_lengths.append(item['len']) + assert item['len'] > 0, (item['item_name'], item['txt'], item['mel2ph']) + if 'ph_len' in item: + ph_lengths.append(item['ph_len']) + total_sec += item['sec'] + builder.finalize() + np.save(f'{data_dir}/{prefix}_lengths.npy', mel_lengths) + if len(ph_lengths) > 0: + np.save(f'{data_dir}/{prefix}_ph_lengths.npy', ph_lengths) + print(f"| {prefix} total duration: {total_sec:.3f}s") + + def process_data_single_processing(self, prefix): + data_dir = hparams['binary_data_dir'] + builder = IndexedDatasetBuilder(f'{data_dir}/{prefix}') + meta_data = list(self.meta_data(prefix)) + ph_lengths = [] + mel_lengths = [] + total_sec = 0 + + if self.binarization_args['with_spk_embed']: + voice_encoder = VoiceEncoder().cuda() + for raw_item in tqdm(meta_data): + item = self.process_item(raw_item, self.binarization_args) + if item is None: + continue + if item is not None: + if use_graph: + if item['dgl_graph'].num_nodes() != np.array(item['ph2word']).max(): + print(f"Skip Item: {item['item_name']} word nodes number incorrect!") + continue + + if self.binarization_args['with_spk_embed']: + spk_embed = self.get_spk_embed(item['wav'], {'voice_encoder': voice_encoder}) + item['spk_embed'] = spk_embed + + if not self.binarization_args['with_wav'] and 'wav' in item: + del item['wav'] + builder.add_item(item) + mel_lengths.append(item['len']) + assert item['len'] > 0, (item['item_name'], item['txt'], item['mel2ph']) + if 'ph_len' in item: + ph_lengths.append(item['ph_len']) + total_sec += item['sec'] + builder.finalize() + np.save(f'{data_dir}/{prefix}_lengths.npy', mel_lengths) + if len(ph_lengths) > 0: + np.save(f'{data_dir}/{prefix}_ph_lengths.npy', ph_lengths) + print(f"| {prefix} total duration: {total_sec:.3f}s") + + # def process_data_single_processing(self, prefix): + # data_dir = hparams['binary_data_dir'] + # builder = IndexedDatasetBuilder(f'{data_dir}/{prefix}') + # meta_data = list(self.meta_data(prefix)) + # ph_lengths = [] + # mel_lengths = [] + # total_sec = 0 + # items = [] + # args = [{'item': item} for item in meta_data] + + # for raw_item in tqdm(meta_data): + # item = self.process_item(raw_item, self.binarization_args) + # if item is not None: + # if item['dgl_graph'].num_nodes() != np.array(item['ph2word']).max(): + # print(f"Skip Item: {item['item_name']} word nodes number incorrect!") + # continue + + # items.append(item) + + # if self.binarization_args['with_spk_embed']: + # args = [{'wav': item['wav']} for item in items] + # for item_id, spk_embed in multiprocess_run_tqdm( + # self.get_spk_embed, args, + # init_ctx_func=lambda wid: {'voice_encoder': VoiceEncoder().cuda()}, num_workers=4, + # desc='Extracting spk embed'): + # items[item_id]['spk_embed'] = spk_embed + + # for item in items: + # if not self.binarization_args['with_wav'] and 'wav' in item: + # del item['wav'] + # builder.add_item(item) + # mel_lengths.append(item['len']) + # assert item['len'] > 0, (item['item_name'], item['txt'], item['mel2ph']) + # if 'ph_len' in item: + # ph_lengths.append(item['ph_len']) + # total_sec += item['sec'] + # builder.finalize() + # np.save(f'{data_dir}/{prefix}_lengths.npy', mel_lengths) + # if len(ph_lengths) > 0: + # np.save(f'{data_dir}/{prefix}_ph_lengths.npy', ph_lengths) + # print(f"| {prefix} total duration: {total_sec:.3f}s") + + @classmethod + def process_item(cls, item, binarization_args): + try: + item['ph_len'] = len(item['ph_token']) + item_name = item['item_name'] + wav_fn = item['wav_fn'] + wav, mel = cls.process_audio(wav_fn, item, binarization_args) + except Exception as e: + print(f"| Skip item ({e}) for index error. item_name: {item_name}, wav_fn: {wav_fn}") + return None + try: + n_bos_frames, n_eos_frames = 0, 0 + if binarization_args['with_align']: + tg_fn = f"{hparams['processed_data_dir']}/mfa_outputs/{item_name}.TextGrid" + item['tg_fn'] = tg_fn + cls.process_align(tg_fn, item) + if binarization_args['trim_eos_bos']: + n_bos_frames = item['dur'][0] + n_eos_frames = item['dur'][-1] + T = len(mel) + item['mel'] = mel[n_bos_frames:T - n_eos_frames] + + item['mel2ph'] = item['mel2ph'][n_bos_frames:T - n_eos_frames] + item['mel2word'] = item['mel2word'][n_bos_frames:T - n_eos_frames] + item['dur'] = item['dur'][1:-1] + item['dur_word'] = item['dur_word'][1:-1] + item['len'] = item['mel'].shape[0] + item['wav'] = wav[n_bos_frames * hparams['hop_size']:len(wav) - n_eos_frames * hparams['hop_size']] + if binarization_args['with_f0']: + cls.process_pitch(item, n_bos_frames, n_eos_frames) + except BinarizationError as e: + print(f"| Skip item ({e}). item_name: {item_name}, wav_fn: {wav_fn}") + return None + except Exception as e: + traceback.print_exc() + print(f"| Skip item. item_name: {item_name}, wav_fn: {wav_fn}") + return None + + # if item['mel'].shape[0] < 64: + # print(f"Skip Item: {item['item_name']} Mel-spectrogram is shorter than 64!") + # return None + # fix one bad case of stanza + if item['txt'].endswith('yn .'): + item['txt'] = item['txt'][:-4]+'y .' + if use_graph: + try: + language = sentence2graph_parser.language + if language == 'en': + dgl_graph, etypes = sentence2graph_parser.parse(item['txt']) + elif language == 'zh': + dgl_graph, etypes = sentence2graph_parser.parse(item['txt'], item['word'].split(" "), item['ph_gb_word'].split(" ")) + else: + raise NotImplementedError + item['dgl_graph'] = dgl_graph + item['edge_types'] = etypes + except: + print(f"| Dependency Parsing Error! Skip item. item_name: {item_name}, wav_fn: {wav_fn}") + return None + + if use_bpe: + sent = item['word'][6:-6] # discard the and , because the bert_tokenizer cannot recognize them. + bert_tokens = bert_tokenizer.tokenize(sent) + input_ids = bert_tokenizer.convert_tokens_to_ids(bert_tokens) + input_ids.insert(0, 101) # add [CLS] to represent [BOS] + input_ids.append(102) # add [SEP] to represent [EOS] + + bert_tokens.insert(0, '') + bert_tokens.append('') + bert_token2word = [] + word_idx = 0 + for i in range(len(bert_tokens)): + if not bert_tokens[i].startswith("##"): # this token is a independent word + word_idx += 1 + bert_token2word.append(word_idx) + + item['bert_token'] = bert_tokens + item['bert_input_ids'] = input_ids + item['bert_token2word'] = bert_token2word + item['bert_attention_mask'] = [1 for _ in range(len(bert_tokens))] + item['bert_token_type_ids'] = [0 for _ in range(len(bert_tokens))] + + return item + + @classmethod + def process_audio(cls, wav_fn, res, binarization_args): + wav2spec_dict = librosa_wav2spec( + wav_fn, + fft_size=hparams['fft_size'], + hop_size=hparams['hop_size'], + win_length=hparams['win_size'], + num_mels=hparams['audio_num_mel_bins'], + fmin=hparams['fmin'], + fmax=hparams['fmax'], + sample_rate=hparams['audio_sample_rate'], + loud_norm=hparams['loud_norm']) + mel = wav2spec_dict['mel'] + wav = wav2spec_dict['wav'].astype(np.float16) + if binarization_args['with_linear']: + res['linear'] = wav2spec_dict['linear'] + res.update({'mel': mel, 'wav': wav, 'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0]}) + return wav, mel + + @staticmethod + def process_align(tg_fn, item): + ph = item['ph'] + mel = item['mel'] + ph_token = item['ph_token'] + if tg_fn is not None and os.path.exists(tg_fn): + mel2ph, dur = get_mel2ph(tg_fn, ph, mel, hparams['hop_size'], hparams['audio_sample_rate'], + hparams['binarization_args']['min_sil_duration']) + else: + raise BinarizationError(f"Align not found") + if np.array(mel2ph).max() - 1 >= len(ph_token): + raise BinarizationError( + f"Align does not match: mel2ph.max() - 1: {np.array(mel2ph).max() - 1}, len(phone_encoded): {len(ph_token)}") + item['mel2ph'] = mel2ph + item['dur'] = dur + + ph2word = item['ph2word'] + mel2word = [ph2word[p - 1] for p in item['mel2ph']] + item['mel2word'] = mel2word # [T_mel] + dur_word = mel2token_to_dur(mel2word, len(item['word_token'])) + item['dur_word'] = dur_word.tolist() # [T_word] + + @staticmethod + def process_pitch(item, n_bos_frames, n_eos_frames): + wav, mel = item['wav'], item['mel'] + f0 = extract_pitch_simple(item['wav']) + if sum(f0) == 0: + raise BinarizationError("Empty f0") + assert len(mel) == len(f0), (len(mel), len(f0)) + pitch_coarse = f0_to_coarse(f0) + item['f0'] = f0 + item['pitch'] = pitch_coarse + if hparams['binarization_args']['with_f0cwt']: + uv, cont_lf0_lpf = get_cont_lf0(f0) + logf0s_mean_org, logf0s_std_org = np.mean(cont_lf0_lpf), np.std(cont_lf0_lpf) + cont_lf0_lpf_norm = (cont_lf0_lpf - logf0s_mean_org) / logf0s_std_org + cwt_spec, scales = get_lf0_cwt(cont_lf0_lpf_norm) + item['cwt_spec'] = cwt_spec + item['cwt_mean'] = logf0s_mean_org + item['cwt_std'] = logf0s_std_org + + @staticmethod + def get_spk_embed(wav, ctx): + return ctx['voice_encoder'].embed_utterance(wav.astype(float)) + + @property + def num_workers(self): + return int(os.getenv('N_PROC', hparams.get('N_PROC', os.cpu_count()))) diff --git a/text_to_speech/data_gen/tts/base_preprocess.py b/text_to_speech/data_gen/tts/base_preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..db5e3ab88861c044e2c33247d818d5e418b6cddb --- /dev/null +++ b/text_to_speech/data_gen/tts/base_preprocess.py @@ -0,0 +1,252 @@ +import json +import os +import random +import re +import traceback +from collections import Counter +from functools import partial + +import librosa +from tqdm import tqdm +from text_to_speech.data_gen.tts.txt_processors.base_text_processor import get_txt_processor_cls +from text_to_speech.data_gen.tts.wav_processors.base_processor import get_wav_processor_cls +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.commons.multiprocess_utils import multiprocess_run_tqdm +from text_to_speech.utils.os_utils import link_file, move_file, remove_file +from text_to_speech.utils.text.text_encoder import is_sil_phoneme, build_token_encoder + + +class BasePreprocessor: + def __init__(self): + self.preprocess_args = hparams['preprocess_args'] + txt_processor = self.preprocess_args['txt_processor'] + self.txt_processor = get_txt_processor_cls(txt_processor) + self.raw_data_dir = hparams['raw_data_dir'] + self.processed_dir = hparams['processed_data_dir'] + self.spk_map_fn = f"{self.processed_dir}/spk_map.json" + + def meta_data(self): + """ + + :return: {'item_name': Str, 'wav_fn': Str, 'txt': Str, 'spk_name': Str, 'txt_loader': None or Func} + """ + raise NotImplementedError + + def process(self): + processed_dir = self.processed_dir + wav_processed_tmp_dir = f'{processed_dir}/processed_tmp' + remove_file(wav_processed_tmp_dir) + os.makedirs(wav_processed_tmp_dir, exist_ok=True) + wav_processed_dir = f'{processed_dir}/{self.wav_processed_dirname}' + remove_file(wav_processed_dir) + os.makedirs(wav_processed_dir, exist_ok=True) + + meta_data = list(tqdm(self.meta_data(), desc='Load meta data')) + item_names = [d['item_name'] for d in meta_data] + assert len(item_names) == len(set(item_names)), 'Key `item_name` should be Unique.' + + # preprocess data + phone_list = [] + word_list = [] + spk_names = set() + process_item = partial(self.preprocess_first_pass, + txt_processor=self.txt_processor, + wav_processed_dir=wav_processed_dir, + wav_processed_tmp=wav_processed_tmp_dir, + preprocess_args=self.preprocess_args) + items = [] + args = [{ + 'item_name': item_raw['item_name'], + 'txt_raw': item_raw['txt'], + 'wav_fn': item_raw['wav_fn'], + 'txt_loader': item_raw.get('txt_loader'), + 'others': item_raw.get('others', None) + } for item_raw in meta_data] + for item_, (item_id, item) in zip(meta_data, multiprocess_run_tqdm(process_item, args, desc='Preprocess')): + if item is not None: + item_.update(item) + item = item_ + if 'txt_loader' in item: + del item['txt_loader'] + item['id'] = item_id + item['spk_name'] = item.get('spk_name', '') + item['others'] = item.get('others', None) + phone_list += item['ph'].split(" ") + word_list += item['word'].split(" ") + spk_names.add(item['spk_name']) + items.append(item) + + # add encoded tokens + ph_encoder, word_encoder = self._phone_encoder(phone_list), self._word_encoder(word_list) + spk_map = self.build_spk_map(spk_names) + args = [{ + 'ph': item['ph'], 'word': item['word'], 'spk_name': item['spk_name'], + 'word_encoder': word_encoder, 'ph_encoder': ph_encoder, 'spk_map': spk_map + } for item in items] + for idx, item_new_kv in multiprocess_run_tqdm(self.preprocess_second_pass, args, desc='Add encoded tokens'): + items[idx].update(item_new_kv) + + # build mfa data + if self.preprocess_args['use_mfa']: + mfa_dict = set() + mfa_input_dir = f'{processed_dir}/mfa_inputs' + remove_file(mfa_input_dir) + # group MFA inputs for better parallelism + mfa_groups = [i // self.preprocess_args['nsample_per_mfa_group'] for i in range(len(items))] + if self.preprocess_args['mfa_group_shuffle']: + random.seed(hparams['seed']) + random.shuffle(mfa_groups) + args = [{ + 'item': item, 'mfa_input_dir': mfa_input_dir, + 'mfa_group': mfa_group, 'wav_processed_tmp': wav_processed_tmp_dir, + 'preprocess_args': self.preprocess_args + } for item, mfa_group in zip(items, mfa_groups)] + for i, (ph_gb_word_nosil, new_wav_align_fn) in multiprocess_run_tqdm( + self.build_mfa_inputs, args, desc='Build MFA data'): + items[i]['wav_align_fn'] = new_wav_align_fn + for w in ph_gb_word_nosil.split(" "): + mfa_dict.add(f"{w} {w.replace('_', ' ')}") + mfa_dict = sorted(mfa_dict) + with open(f'{processed_dir}/mfa_dict.txt', 'w') as f: + f.writelines([f'{l}\n' for l in mfa_dict]) + with open(f"{processed_dir}/{self.meta_csv_filename}.json", 'w') as f: + f.write(re.sub(r'\n\s+([\d+\]])', r'\1', json.dumps(items, ensure_ascii=False, sort_keys=False, indent=1))) + remove_file(wav_processed_tmp_dir) + + @classmethod + def preprocess_first_pass(cls, item_name, txt_raw, txt_processor, + wav_fn, wav_processed_dir, wav_processed_tmp, + preprocess_args, txt_loader=None, others=None): + try: + if txt_loader is not None: + txt_raw = txt_loader(txt_raw) + ph, txt, word, ph2word, ph_gb_word = cls.txt_to_ph(txt_processor, txt_raw, preprocess_args) + + wav_fn, wav_align_fn = cls.process_wav( + item_name, wav_fn, + hparams['processed_data_dir'], + wav_processed_tmp, preprocess_args) + + # wav for binarization + ext = os.path.splitext(wav_fn)[1] + os.makedirs(wav_processed_dir, exist_ok=True) + new_wav_fn = f"{wav_processed_dir}/{item_name}{ext}" + move_link_func = move_file if os.path.dirname(wav_fn) == wav_processed_tmp else link_file + move_link_func(wav_fn, new_wav_fn) + return { + 'txt': txt, 'txt_raw': txt_raw, 'ph': ph, + 'word': word, 'ph2word': ph2word, 'ph_gb_word': ph_gb_word, + 'wav_fn': new_wav_fn, 'wav_align_fn': wav_align_fn, + 'others': others + } + except: + traceback.print_exc() + print(f"| Error is caught. item_name: {item_name}.") + return None + + @staticmethod + def txt_to_ph(txt_processor, txt_raw, preprocess_args): + txt_struct, txt = txt_processor.process(txt_raw, preprocess_args) + ph = [p for w in txt_struct for p in w[1]] + ph_gb_word = ["_".join(w[1]) for w in txt_struct] + words = [w[0] for w in txt_struct] + # word_id=0 is reserved for padding + ph2word = [w_id + 1 for w_id, w in enumerate(txt_struct) for _ in range(len(w[1]))] + return " ".join(ph), txt, " ".join(words), ph2word, " ".join(ph_gb_word) + + @staticmethod + def process_wav(item_name, wav_fn, processed_dir, wav_processed_tmp, preprocess_args): + processors = [get_wav_processor_cls(v) for v in preprocess_args['wav_processors']] + processors = [k() for k in processors if k is not None] + if len(processors) >= 1: + sr_file = librosa.core.get_samplerate(wav_fn) + output_fn_for_align = None + ext = os.path.splitext(wav_fn)[1] + input_fn = f"{wav_processed_tmp}/{item_name}{ext}" + link_file(wav_fn, input_fn) + for p in processors: + outputs = p.process(input_fn, sr_file, wav_processed_tmp, processed_dir, item_name, preprocess_args) + if len(outputs) == 3: + input_fn, sr, output_fn_for_align = outputs + else: + input_fn, sr = outputs + return input_fn, output_fn_for_align + else: + return wav_fn, wav_fn + + def _phone_encoder(self, ph_set): + ph_set_fn = f"{self.processed_dir}/phone_set.json" + if self.preprocess_args['reset_phone_dict'] or not os.path.exists(ph_set_fn): + ph_set = sorted(set(ph_set)) + json.dump(ph_set, open(ph_set_fn, 'w'), ensure_ascii=False) + print("| Build phone set: ", ph_set) + else: + ph_set = json.load(open(ph_set_fn, 'r')) + print("| Load phone set: ", ph_set) + return build_token_encoder(ph_set_fn) + + def _word_encoder(self, word_set): + word_set_fn = f"{self.processed_dir}/word_set.json" + if self.preprocess_args['reset_word_dict']: + word_set = Counter(word_set) + total_words = sum(word_set.values()) + word_set = word_set.most_common(hparams['word_dict_size']) + num_unk_words = total_words - sum([x[1] for x in word_set]) + word_set = ['', ''] + [x[0] for x in word_set] + word_set = sorted(set(word_set)) + json.dump(word_set, open(word_set_fn, 'w'), ensure_ascii=False) + print(f"| Build word set. Size: {len(word_set)}, #total words: {total_words}," + f" #unk_words: {num_unk_words}, word_set[:10]:, {word_set[:10]}.") + else: + word_set = json.load(open(word_set_fn, 'r')) + print("| Load word set. Size: ", len(word_set), word_set[:10]) + return build_token_encoder(word_set_fn) + + @classmethod + def preprocess_second_pass(cls, word, ph, spk_name, word_encoder, ph_encoder, spk_map): + word_token = word_encoder.encode(word) + ph_token = ph_encoder.encode(ph) + spk_id = spk_map[spk_name] + return {'word_token': word_token, 'ph_token': ph_token, 'spk_id': spk_id} + + def build_spk_map(self, spk_names): + spk_map = {x: i for i, x in enumerate(sorted(list(spk_names)))} + assert len(spk_map) == 0 or len(spk_map) <= hparams['num_spk'], len(spk_map) + print(f"| Number of spks: {len(spk_map)}, spk_map: {spk_map}") + json.dump(spk_map, open(self.spk_map_fn, 'w'), ensure_ascii=False) + return spk_map + + @classmethod + def build_mfa_inputs(cls, item, mfa_input_dir, mfa_group, wav_processed_tmp, preprocess_args): + item_name = item['item_name'] + wav_align_fn = item['wav_align_fn'] + ph_gb_word = item['ph_gb_word'] + ext = os.path.splitext(wav_align_fn)[1] + mfa_input_group_dir = f'{mfa_input_dir}/{mfa_group}' + os.makedirs(mfa_input_group_dir, exist_ok=True) + new_wav_align_fn = f"{mfa_input_group_dir}/{item_name}{ext}" + move_link_func = move_file if os.path.dirname(wav_align_fn) == wav_processed_tmp else link_file + move_link_func(wav_align_fn, new_wav_align_fn) + ph_gb_word_nosil = " ".join(["_".join([p for p in w.split("_") if not is_sil_phoneme(p)]) + for w in ph_gb_word.split(" ") if not is_sil_phoneme(w)]) + with open(f'{mfa_input_group_dir}/{item_name}.lab', 'w') as f_txt: + f_txt.write(ph_gb_word_nosil) + return ph_gb_word_nosil, new_wav_align_fn + + def load_spk_map(self, base_dir): + spk_map_fn = f"{base_dir}/spk_map.json" + spk_map = json.load(open(spk_map_fn, 'r')) + return spk_map + + def load_dict(self, base_dir): + ph_encoder = build_token_encoder(f'{base_dir}/phone_set.json') + word_encoder = build_token_encoder(f'{base_dir}/word_set.json') + return ph_encoder, word_encoder + + @property + def meta_csv_filename(self): + return 'metadata' + + @property + def wav_processed_dirname(self): + return 'wav_processed' diff --git a/text_to_speech/data_gen/tts/runs/align_and_binarize.py b/text_to_speech/data_gen/tts/runs/align_and_binarize.py new file mode 100644 index 0000000000000000000000000000000000000000..b385ae63f99d139bced86ec477ff97dfef11e5bb --- /dev/null +++ b/text_to_speech/data_gen/tts/runs/align_and_binarize.py @@ -0,0 +1,12 @@ +import utils.commons.single_thread_env # NOQA +from text_to_speech.utils.commons.hparams import set_hparams, hparams +from text_to_speech.data_gen.tts.runs.binarize import binarize +from text_to_speech.data_gen.tts.runs.preprocess import preprocess +from text_to_speech.data_gen.tts.runs.train_mfa_align import train_mfa_align + +if __name__ == '__main__': + set_hparams() + preprocess() + if hparams['preprocess_args']['use_mfa']: + train_mfa_align() + binarize() diff --git a/text_to_speech/data_gen/tts/runs/binarize.py b/text_to_speech/data_gen/tts/runs/binarize.py new file mode 100644 index 0000000000000000000000000000000000000000..81cbf21dd50ece1302a9c6a052fc0443b6b5c621 --- /dev/null +++ b/text_to_speech/data_gen/tts/runs/binarize.py @@ -0,0 +1,17 @@ +import utils.commons.single_thread_env # NOQA +from text_to_speech.utils.commons.hparams import hparams, set_hparams +import importlib + + +def binarize(): + binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizer.BaseBinarizer') + pkg = ".".join(binarizer_cls.split(".")[:-1]) + cls_name = binarizer_cls.split(".")[-1] + binarizer_cls = getattr(importlib.import_module(pkg), cls_name) + print("| Binarizer: ", binarizer_cls) + binarizer_cls().process() + + +if __name__ == '__main__': + set_hparams() + binarize() diff --git a/text_to_speech/data_gen/tts/runs/preprocess.py b/text_to_speech/data_gen/tts/runs/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..951d65a98420e84a6dc06039071d07e6ad79cd2d --- /dev/null +++ b/text_to_speech/data_gen/tts/runs/preprocess.py @@ -0,0 +1,17 @@ +import utils.commons.single_thread_env # NOQA +from text_to_speech.utils.commons.hparams import hparams, set_hparams +import importlib + + +def preprocess(): + assert hparams['preprocess_cls'] != '' + + pkg = ".".join(hparams["preprocess_cls"].split(".")[:-1]) + cls_name = hparams["preprocess_cls"].split(".")[-1] + process_cls = getattr(importlib.import_module(pkg), cls_name) + process_cls().process() + + +if __name__ == '__main__': + set_hparams() + preprocess() diff --git a/text_to_speech/data_gen/tts/runs/train_mfa_align.py b/text_to_speech/data_gen/tts/runs/train_mfa_align.py new file mode 100644 index 0000000000000000000000000000000000000000..f0b845e7388b5656c4e0e2c5efddad7f1aa28e6a --- /dev/null +++ b/text_to_speech/data_gen/tts/runs/train_mfa_align.py @@ -0,0 +1,46 @@ +import utils.commons.single_thread_env # NOQA +import glob +import subprocess +from textgrid import TextGrid +import os +from text_to_speech.utils.commons.hparams import hparams, set_hparams + + +def train_mfa_align(mfa_outputs="mfa_outputs", + mfa_inputs="mfa_inputs", + model_name=None, pretrain_model_name=None, + mfa_cmd='train'): + CORPUS = hparams['processed_data_dir'].split("/")[-1] + NUM_JOB = int(os.getenv('N_PROC', os.cpu_count())) + env_vars = [f'CORPUS={CORPUS}', f'NUM_JOB={NUM_JOB}'] + if mfa_outputs is not None: + env_vars.append(f'MFA_OUTPUTS={mfa_outputs}') + if mfa_inputs is not None: + env_vars.append(f'MFA_INPUTS={mfa_inputs}') + if model_name is not None: + env_vars.append(f'MODEL_NAME={model_name}') + if pretrain_model_name is not None: + env_vars.append(f'PRETRAIN_MODEL_NAME={pretrain_model_name}') + if mfa_cmd is not None: + env_vars.append(f'MFA_CMD={mfa_cmd}') + env_str = ' '.join(env_vars) + print(f"| Run MFA for {CORPUS}. Env vars: {env_str}") + subprocess.check_call(f'{env_str} bash mfa_usr/run_mfa_train_align.sh', shell=True) + mfa_offset = hparams['preprocess_args']['mfa_offset'] + if mfa_offset > 0: + for tg_fn in glob.glob(f'{hparams["processed_data_dir"]}/{mfa_outputs}/*.TextGrid'): + tg = TextGrid.fromFile(tg_fn) + max_time = tg.maxTime + for tier in tg.tiers: + for interval in tier.intervals: + interval.maxTime = min(interval.maxTime + mfa_offset, max_time) + interval.minTime = min(interval.minTime + mfa_offset, max_time) + tier.intervals[0].minTime = 0 + tier.maxTime = min(tier.maxTime + mfa_offset, max_time) + tg.write(tg_fn) + TextGrid.fromFile(tg_fn) + + +if __name__ == '__main__': + set_hparams(print_hparams=False) + train_mfa_align() diff --git a/text_to_speech/data_gen/tts/txt_processors/__init__.py b/text_to_speech/data_gen/tts/txt_processors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7815fc6d95bd38518a6213df09d2a020b77106f8 --- /dev/null +++ b/text_to_speech/data_gen/tts/txt_processors/__init__.py @@ -0,0 +1 @@ +from . import en, zh, zh_aishell_no_tone_sing \ No newline at end of file diff --git a/text_to_speech/data_gen/tts/txt_processors/__pycache__/__init__.cpython-38.pyc b/text_to_speech/data_gen/tts/txt_processors/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..792bbb4375295826e2de34c2cddc0063c66e006b Binary files /dev/null and b/text_to_speech/data_gen/tts/txt_processors/__pycache__/__init__.cpython-38.pyc differ diff --git a/text_to_speech/data_gen/tts/txt_processors/__pycache__/base_text_processor.cpython-38.pyc b/text_to_speech/data_gen/tts/txt_processors/__pycache__/base_text_processor.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1537a77cfdb1613322ae7c2237ff1fc16fd0d1df Binary files /dev/null and b/text_to_speech/data_gen/tts/txt_processors/__pycache__/base_text_processor.cpython-38.pyc differ diff --git a/text_to_speech/data_gen/tts/txt_processors/__pycache__/en.cpython-38.pyc b/text_to_speech/data_gen/tts/txt_processors/__pycache__/en.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..354650fb418bd842f35d8a3bf8418f759b8c8768 Binary files /dev/null and b/text_to_speech/data_gen/tts/txt_processors/__pycache__/en.cpython-38.pyc differ diff --git a/text_to_speech/data_gen/tts/txt_processors/__pycache__/zh.cpython-38.pyc b/text_to_speech/data_gen/tts/txt_processors/__pycache__/zh.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..995715c838e60ce5da1339e911ec7c2263d25f67 Binary files /dev/null and b/text_to_speech/data_gen/tts/txt_processors/__pycache__/zh.cpython-38.pyc differ diff --git a/text_to_speech/data_gen/tts/txt_processors/__pycache__/zh_aishell_no_tone_sing.cpython-38.pyc b/text_to_speech/data_gen/tts/txt_processors/__pycache__/zh_aishell_no_tone_sing.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ec2154a83c212bec943458cd0f6746b4bc4a2ee Binary files /dev/null and b/text_to_speech/data_gen/tts/txt_processors/__pycache__/zh_aishell_no_tone_sing.cpython-38.pyc differ diff --git a/text_to_speech/data_gen/tts/txt_processors/base_text_processor.py b/text_to_speech/data_gen/tts/txt_processors/base_text_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..e88a9a55e8148d09988134246e8563602e6d6d42 --- /dev/null +++ b/text_to_speech/data_gen/tts/txt_processors/base_text_processor.py @@ -0,0 +1,50 @@ +from text_to_speech.utils.text.text_encoder import is_sil_phoneme + +REGISTERED_TEXT_PROCESSORS = {} + + +def register_txt_processors(name): + def _f(cls): + REGISTERED_TEXT_PROCESSORS[name] = cls + return cls + + return _f + + +def get_txt_processor_cls(name): + return REGISTERED_TEXT_PROCESSORS.get(name, None) + + +class BaseTxtProcessor: + @staticmethod + def sp_phonemes(): + return ['|'] + + @classmethod + def process(cls, txt, preprocess_args): + raise NotImplementedError + + @classmethod + def postprocess(cls, txt_struct, preprocess_args): + # remove sil phoneme in head and tail + while len(txt_struct) > 0 and is_sil_phoneme(txt_struct[0][0]): + txt_struct = txt_struct[1:] + while len(txt_struct) > 0 and is_sil_phoneme(txt_struct[-1][0]): + txt_struct = txt_struct[:-1] + if preprocess_args['with_phsep']: + txt_struct = cls.add_bdr(txt_struct) + if preprocess_args['add_eos_bos']: + txt_struct = [["", [""]]] + txt_struct + [["", [""]]] + return txt_struct + + @classmethod + def add_bdr(cls, txt_struct): + txt_struct_ = [] + for i, ts in enumerate(txt_struct): + txt_struct_.append(ts) + if i != len(txt_struct) - 1 and \ + not is_sil_phoneme(txt_struct[i][0]) and not is_sil_phoneme(txt_struct[i + 1][0]): + # txt_struct_.append(['|', ['|']]) + # We disbale the sep token because it is imcompatible with syntactic graph. + pass + return txt_struct_ diff --git a/text_to_speech/data_gen/tts/txt_processors/en.py b/text_to_speech/data_gen/tts/txt_processors/en.py new file mode 100644 index 0000000000000000000000000000000000000000..6455d20404702fc8ced8571c2f187a744f7a20a9 --- /dev/null +++ b/text_to_speech/data_gen/tts/txt_processors/en.py @@ -0,0 +1,78 @@ +import re +import unicodedata + +from g2p_en import G2p +from g2p_en.expand import normalize_numbers +from nltk import pos_tag +from nltk.tokenize import TweetTokenizer + +from text_to_speech.data_gen.tts.txt_processors.base_text_processor import BaseTxtProcessor, register_txt_processors +from text_to_speech.utils.text.text_encoder import PUNCS, is_sil_phoneme + + +class EnG2p(G2p): + word_tokenize = TweetTokenizer().tokenize + + def __call__(self, text): + # preprocessing + words = EnG2p.word_tokenize(text) + tokens = pos_tag(words) # tuples of (word, tag) + + # steps + prons = [] + for word, pos in tokens: + if re.search("[a-z]", word) is None: + pron = [word] + + elif word in self.homograph2features: # Check homograph + pron1, pron2, pos1 = self.homograph2features[word] + if pos.startswith(pos1): + pron = pron1 + else: + pron = pron2 + elif word in self.cmu: # lookup CMU dict + pron = self.cmu[word][0] + else: # predict for oov + pron = self.predict(word) + + prons.extend(pron) + prons.extend([" "]) + + return prons[:-1] + + +@register_txt_processors('en') +class TxtProcessor(BaseTxtProcessor): + g2p = EnG2p() + + @staticmethod + def preprocess_text(text): + text = normalize_numbers(text) + text = ''.join(char for char in unicodedata.normalize('NFD', text) + if unicodedata.category(char) != 'Mn') # Strip accents + text = text.lower() + text = re.sub("[\'\"()]+", "", text) + text = re.sub("[-]+", " ", text) + text = re.sub(f"[^ a-z{PUNCS}]", "", text) + text = re.sub(f" ?([{PUNCS}]) ?", r"\1", text) # !! -> ! + text = re.sub(f"([{PUNCS}])+", r"\1", text) # !! -> ! + text = text.replace("i.e.", "that is") + text = text.replace("i.e.", "that is") + text = text.replace("etc.", "etc") + text = re.sub(f"([{PUNCS}])", r" \1 ", text) + text = re.sub(rf"\s+", r" ", text) + return text + + @classmethod + def process(cls, txt, preprocess_args): + txt = cls.preprocess_text(txt).strip() + phs = cls.g2p(txt) + txt_struct = [[w, []] for w in txt.split(" ")] + i_word = 0 + for p in phs: + if p == ' ': + i_word += 1 + else: + txt_struct[i_word][1].append(p) + txt_struct = cls.postprocess(txt_struct, preprocess_args) + return txt_struct, txt diff --git a/text_to_speech/data_gen/tts/txt_processors/zh.py b/text_to_speech/data_gen/tts/txt_processors/zh.py new file mode 100644 index 0000000000000000000000000000000000000000..4b26f6ba130822c2de9d0a0a91e0cb1a9e6d79f9 --- /dev/null +++ b/text_to_speech/data_gen/tts/txt_processors/zh.py @@ -0,0 +1,117 @@ +import re +import jieba +from pypinyin import pinyin, Style +from text_to_speech.utils.text.text_norm import NSWNormalizer +from text_to_speech.data_gen.tts.txt_processors.base_text_processor import BaseTxtProcessor, register_txt_processors +from text_to_speech.utils.text.text_encoder import PUNCS, is_sil_phoneme + +ALL_SHENMU = ['zh', 'ch', 'sh', 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'j', + 'q', 'x', 'r', 'z', 'c', 's', 'y', 'w'] + + +@register_txt_processors('zh') +class TxtProcessor(BaseTxtProcessor): + table = {ord(f): ord(t) for f, t in zip( + u':,。!?【】()%#@&1234567890', + u':,.!?[]()%#@&1234567890')} + + @staticmethod + def sp_phonemes(): + return ['|', '#'] + + @staticmethod + def preprocess_text(text): + text = text.translate(TxtProcessor.table) + text = NSWNormalizer(text).normalize(remove_punc=False).lower() + text = re.sub("[\'\"()]+", "", text) + text = re.sub("[-]+", " ", text) + text = re.sub(f"[^ A-Za-z\u4e00-\u9fff{PUNCS}]", "", text) + text = re.sub(f"([{PUNCS}])+", r"\1", text) # !! -> ! + text = re.sub(f"([{PUNCS}])", r" \1 ", text) + text = re.sub(rf"\s+", r"", text) + text = re.sub(rf"[A-Za-z]+", r"$", text) + return text + + @classmethod + def pinyin_with_en(cls, txt, style): + x = pinyin(txt, style) + x = [t[0] for t in x] + x_ = [] + for t in x: + if '$' not in t: + x_.append(t) + else: + x_ += list(t) + x_ = [t if t != '$' else 'ENG' for t in x_] + return x_ + + @classmethod + def process(cls, txt, pre_align_args): + txt = cls.preprocess_text(txt) + txt = txt.replace("嗯", "蒽") # pypin会把嗯的声母韵母识别为'',导致ph2word出现错位。 + # https://blog.csdn.net/zhoulei124/article/details/89055403 + + shengmu = cls.pinyin_with_en(txt, style=Style.INITIALS) + yunmu = cls.pinyin_with_en(txt, style= + Style.FINALS_TONE3 if pre_align_args['use_tone'] else Style.FINALS) + assert len(shengmu) == len(yunmu) + for i in range(len(shengmu)): + if shengmu[i] == '' and yunmu[i] == '': + print(f"发现了一个声母韵母都是空的文字:{txt[i]}") + ph_list = [] + for a, b in zip(shengmu, yunmu): + if a == b: + ph_list += [a] + else: + ph_list += [a + "%" + b] + seg_list = '#'.join(jieba.cut(txt)) + assert len(ph_list) == len([s for s in seg_list if s != '#']), (ph_list, seg_list) + + # 加入词边界'#' + ph_list_ = [] + seg_idx = 0 + for p in ph_list: + if seg_list[seg_idx] == '#': + ph_list_.append('#') + seg_idx += 1 + elif len(ph_list_) > 0: + ph_list_.append("|") + seg_idx += 1 + finished = False + if not finished: + ph_list_ += [x for x in p.split("%") if x != ''] + + ph_list = ph_list_ + + # 去除静音符号周围的词边界标记 [..., '#', ',', '#', ...] + sil_phonemes = list(PUNCS) + TxtProcessor.sp_phonemes() + ph_list_ = [] + for i in range(0, len(ph_list), 1): + if ph_list[i] != '#' or (ph_list[i - 1] not in sil_phonemes and ph_list[i + 1] not in sil_phonemes): + ph_list_.append(ph_list[i]) + ph_list = ph_list_ + + txt_struct = [[w, []] for w in txt] + i = 0 + for ph in ph_list: + if ph == '|' or ph == '#': + i += 1 + continue + # elif ph in [',', '.']: + elif ph in [',', '.', '?', '!', ':']: + i += 1 + txt_struct[i][1].append(ph) + i += 1 + continue + txt_struct[i][1].append(ph) + # return ph_list, txt + txt_struct.insert(0, ['', ['']]) + txt_struct.append(['', ['']]) + return txt_struct, txt + + +if __name__ == '__main__': + # t = 'simon演唱过后,simon还进行了simon精彩的文艺演出simon.' + t = '你当我傻啊?脑子那么大怎么塞进去???' + phs, txt = TxtProcessor.process(t, {'use_tone': True}) + print(phs, txt) diff --git a/text_to_speech/data_gen/tts/txt_processors/zh_aishell_no_tone_sing.py b/text_to_speech/data_gen/tts/txt_processors/zh_aishell_no_tone_sing.py new file mode 100644 index 0000000000000000000000000000000000000000..fcb28831d80d7cc11b17baaf3814b0a1c2f827b8 --- /dev/null +++ b/text_to_speech/data_gen/tts/txt_processors/zh_aishell_no_tone_sing.py @@ -0,0 +1,126 @@ +import re +import jieba +from pypinyin import pinyin, Style +from text_to_speech.utils.text.text_norm import NSWNormalizer +from text_to_speech.data_gen.tts.txt_processors.base_text_processor import BaseTxtProcessor, register_txt_processors +from text_to_speech.utils.text.text_encoder import PUNCS, is_sil_phoneme + +ALL_SHENMU = ['zh', 'ch', 'sh', 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'j', + 'q', 'x', 'r', 'z', 'c', 's', 'y', 'w'] + + +@register_txt_processors('zh') +class TxtProcessor(BaseTxtProcessor): + table = {ord(f): ord(t) for f, t in zip( + u':,。!?【】()%#@&1234567890', + u':,.!?[]()%#@&1234567890')} + + @staticmethod + def sp_phonemes(): + return ['|', '#'] + + @staticmethod + def preprocess_text(text): + text = text.translate(TxtProcessor.table) + text = NSWNormalizer(text).normalize(remove_punc=False).lower() + text = re.sub("[\'\"()]+", "", text) + text = re.sub("[-]+", " ", text) + text = re.sub(f"[^ A-Za-z\u4e00-\u9fff{PUNCS}]", "", text) + text = re.sub(f"([{PUNCS}])+", r"\1", text) # !! -> ! + text = re.sub(f"([{PUNCS}])", r" \1 ", text) + text = re.sub(rf"\s+", r"", text) + text = re.sub(rf"[A-Za-z]+", r"$", text) + return text + + @classmethod + def pinyin_with_en(cls, txt, style): + x = pinyin(txt, style) + x = [t[0] for t in x] + x_ = [] + for t in x: + if '$' not in t: + x_.append(t) + else: + x_ += list(t) + x_ = [t if t != '$' else 'ENG' for t in x_] + return x_ + + @classmethod + def process(cls, txt, pre_align_args): + txt = cls.preprocess_text(txt) + txt = txt.replace("嗯", "蒽") # pypin会把嗯的声母韵母识别为'',导致ph2word出现错位。 + # https://blog.csdn.net/zhoulei124/article/details/89055403 + + pre_align_args['use_tone'] = False + + shengmu = cls.pinyin_with_en(txt, style=Style.INITIALS) + yunmu = cls.pinyin_with_en(txt, style= + Style.FINALS_TONE3 if pre_align_args['use_tone'] else Style.FINALS) + assert len(shengmu) == len(yunmu) + for i in range(len(shengmu)): + if shengmu[i] == '' and yunmu[i] == '': + print(f"发现了一个声母韵母都是空的文字:{txt[i]}") + ph_list = [] + for a, b in zip(shengmu, yunmu): + + if b == 'ueng': # 发现sing数据集里没有后鼻音 + b = 'uen' + + if a == b: + ph_list += [a] + else: + ph_list += [a + "%" + b] + seg_list = '#'.join(jieba.cut(txt)) + assert len(ph_list) == len([s for s in seg_list if s != '#']), (ph_list, seg_list) + + # 加入词边界'#' + ph_list_ = [] + seg_idx = 0 + for p in ph_list: + if seg_list[seg_idx] == '#': + ph_list_.append('#') + seg_idx += 1 + elif len(ph_list_) > 0: + ph_list_.append("|") + seg_idx += 1 + finished = False + if not finished: + ph_list_ += [x for x in p.split("%") if x != ''] + + ph_list = ph_list_ + + # 去除静音符号周围的词边界标记 [..., '#', ',', '#', ...] + sil_phonemes = list(PUNCS) + TxtProcessor.sp_phonemes() + ph_list_ = [] + for i in range(0, len(ph_list), 1): + if ph_list[i] != '#' or (ph_list[i - 1] not in sil_phonemes and ph_list[i + 1] not in sil_phonemes): + ph_list_.append(ph_list[i]) + ph_list = ph_list_ + + txt_struct = [[w, []] for w in txt] + i = 0 + for ph in ph_list: + if ph == '|' or ph == '#': + i += 1 + continue + # elif ph in [',', '.']: + elif ph in [',', '.', '?', '!', ':']: + i += 1 + txt_struct[i][1].append(ph) + i += 1 + continue + txt_struct[i][1].append(ph) + # return ph_list, txt + txt_struct.insert(0, ['_NONE', ['_NONE']]) + txt_struct.append(['breathe', ['breathe']]) + + # txt_struct.insert(0, ['', ['']]) + # txt_struct.append(['', ['']]) + return txt_struct, txt + + +if __name__ == '__main__': + # t = 'simon演唱过后,simon还进行了simon精彩的文艺演出simon.' + t = '你当我傻啊?脑子那么大怎么塞进去???' + phs, txt = TxtProcessor.process(t, {'use_tone': True}) + print(phs, txt) diff --git a/text_to_speech/data_gen/tts/wav_processors/__init__.py b/text_to_speech/data_gen/tts/wav_processors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4be97b377dcb95a0e6bceb876ac0ce93c8290249 --- /dev/null +++ b/text_to_speech/data_gen/tts/wav_processors/__init__.py @@ -0,0 +1,2 @@ +from . import base_processor +from . import common_processors diff --git a/text_to_speech/data_gen/tts/wav_processors/__pycache__/__init__.cpython-38.pyc b/text_to_speech/data_gen/tts/wav_processors/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8db5a25d8e0a3e6dc30db790a2220c6528b7ff1b Binary files /dev/null and b/text_to_speech/data_gen/tts/wav_processors/__pycache__/__init__.cpython-38.pyc differ diff --git a/text_to_speech/data_gen/tts/wav_processors/__pycache__/base_processor.cpython-38.pyc b/text_to_speech/data_gen/tts/wav_processors/__pycache__/base_processor.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d66fe44d42b54dc26d7cf8c93ad55d870cf2d9a Binary files /dev/null and b/text_to_speech/data_gen/tts/wav_processors/__pycache__/base_processor.cpython-38.pyc differ diff --git a/text_to_speech/data_gen/tts/wav_processors/__pycache__/common_processors.cpython-38.pyc b/text_to_speech/data_gen/tts/wav_processors/__pycache__/common_processors.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cf71c732ce33481ea29e2a5863ac5bcef7a257c Binary files /dev/null and b/text_to_speech/data_gen/tts/wav_processors/__pycache__/common_processors.cpython-38.pyc differ diff --git a/text_to_speech/data_gen/tts/wav_processors/base_processor.py b/text_to_speech/data_gen/tts/wav_processors/base_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..e8200dc58a9388ac94a5ec34b8a65f75e380255b --- /dev/null +++ b/text_to_speech/data_gen/tts/wav_processors/base_processor.py @@ -0,0 +1,25 @@ +REGISTERED_WAV_PROCESSORS = {} + + +def register_wav_processors(name): + def _f(cls): + REGISTERED_WAV_PROCESSORS[name] = cls + return cls + + return _f + + +def get_wav_processor_cls(name): + return REGISTERED_WAV_PROCESSORS.get(name, None) + + +class BaseWavProcessor: + @property + def name(self): + raise NotImplementedError + + def output_fn(self, input_fn): + return f'{input_fn[:-4]}_{self.name}.wav' + + def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): + raise NotImplementedError diff --git a/text_to_speech/data_gen/tts/wav_processors/common_processors.py b/text_to_speech/data_gen/tts/wav_processors/common_processors.py new file mode 100644 index 0000000000000000000000000000000000000000..5cf79cfd118bc8ab13355ff57435a244688e4b22 --- /dev/null +++ b/text_to_speech/data_gen/tts/wav_processors/common_processors.py @@ -0,0 +1,86 @@ +import os +import subprocess +import librosa +import numpy as np +from text_to_speech.data_gen.tts.wav_processors.base_processor import BaseWavProcessor, register_wav_processors +from text_to_speech.utils.audio import trim_long_silences +from text_to_speech.utils.audio.io import save_wav +from text_to_speech.utils.audio.rnnoise import rnnoise +from text_to_speech.utils.commons.hparams import hparams + + +@register_wav_processors(name='sox_to_wav') +class ConvertToWavProcessor(BaseWavProcessor): + @property + def name(self): + return 'ToWav' + + def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): + if input_fn[-4:] == '.wav': + return input_fn, sr + else: + output_fn = self.output_fn(input_fn) + subprocess.check_call(f'sox -v 0.95 "{input_fn}" -t wav "{output_fn}"', shell=True) + return output_fn, sr + + +@register_wav_processors(name='sox_resample') +class ResampleProcessor(BaseWavProcessor): + @property + def name(self): + return 'Resample' + + def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): + output_fn = self.output_fn(input_fn) + sr_file = librosa.core.get_samplerate(input_fn) + if sr != sr_file: + subprocess.check_call(f'sox -v 0.95 "{input_fn}" -r{sr} "{output_fn}"', shell=True) + y, _ = librosa.core.load(input_fn, sr=sr) + y, _ = librosa.effects.trim(y) + save_wav(y, output_fn, sr) + return output_fn, sr + else: + return input_fn, sr + + +@register_wav_processors(name='trim_sil') +class TrimSILProcessor(BaseWavProcessor): + @property + def name(self): + return 'TrimSIL' + + def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): + output_fn = self.output_fn(input_fn) + y, _ = librosa.core.load(input_fn, sr=sr) + y, _ = librosa.effects.trim(y) + save_wav(y, output_fn, sr) + return output_fn + + +@register_wav_processors(name='trim_all_sil') +class TrimAllSILProcessor(BaseWavProcessor): + @property + def name(self): + return 'TrimSIL' + + def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): + output_fn = self.output_fn(input_fn) + y, audio_mask, _ = trim_long_silences( + input_fn, vad_max_silence_length=preprocess_args.get('vad_max_silence_length', 12)) + save_wav(y, output_fn, sr) + if preprocess_args['save_sil_mask']: + os.makedirs(f'{processed_dir}/sil_mask', exist_ok=True) + np.save(f'{processed_dir}/sil_mask/{item_name}.npy', audio_mask) + return output_fn, sr + + +@register_wav_processors(name='denoise') +class DenoiseProcessor(BaseWavProcessor): + @property + def name(self): + return 'Denoise' + + def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): + output_fn = self.output_fn(input_fn) + rnnoise(input_fn, output_fn, out_sample_rate=sr) + return output_fn, sr diff --git a/text_to_speech/egs/datasets/audio/aishell3_no_tone/base_text2mel.yaml b/text_to_speech/egs/datasets/audio/aishell3_no_tone/base_text2mel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2a0045b85ca676622aaf2a4913bcfc3aede8db76 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/aishell3_no_tone/base_text2mel.yaml @@ -0,0 +1,31 @@ +base_config: + - egs/egs_bases/tts/base_zh.yaml +raw_data_dir: 'data/raw/AISHELL-3' +processed_data_dir: 'data/processed/aishell3_no_tone' +binary_data_dir: 'data/binary/aishell3_no_tone' +audio_num_mel_bins: 80 +ds_name: aishell + +audio_sample_rate: 24000 +hop_size: 128 # Hop size. +fft_size: 512 # FFT size. +win_size: 512 # FFT size. +fmin: 80 +fmax: 12000 +min_level_db: -120 + +preprocess_cls: egs.datasets.audio.aishell3_no_tone.preprocess.AiShell3Preprocess + +pre_align_args: + use_tone: false # for ZH + txt_processor: zh_aishell_no_tone_sing +binarization_args: + trim_eos_bos: true + +use_spk_id: true +num_spk: 220 +test_num: 200 +binarization_args: + shuffle: true +vocoder: vocoders.hifigan.HifiGAN +vocoder_ckpt: 'checkpoints/0707_hifigan_as3_ori_1' \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/aishell3_no_tone/fs2.yaml b/text_to_speech/egs/datasets/audio/aishell3_no_tone/fs2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1524d5000e8ca591efd61f8bc52360005bfdd204 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/aishell3_no_tone/fs2.yaml @@ -0,0 +1,6 @@ +base_config: + - egs/egs_bases/tts/fs_adv.yaml + - ./base_text2mel.yaml + +task_cls: tasks.tts.fs_adv_aishell.FastSpeechAdvTask +enc_ffn_kernel_size: 5 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/aishell3_no_tone/fs2_kernel9.yaml b/text_to_speech/egs/datasets/audio/aishell3_no_tone/fs2_kernel9.yaml new file mode 100644 index 0000000000000000000000000000000000000000..227f040a333825d0e3103987cfc8fe07bf08898a --- /dev/null +++ b/text_to_speech/egs/datasets/audio/aishell3_no_tone/fs2_kernel9.yaml @@ -0,0 +1,6 @@ +base_config: + - egs/egs_bases/tts/fs_adv.yaml + - ./base_text2mel.yaml + +task_cls: tasks.tts.fs_adv_aishell.FastSpeechAdvTask +enc_ffn_kernel_size: 9 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/aishell3_no_tone/preprocess.py b/text_to_speech/egs/datasets/audio/aishell3_no_tone/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..462cd7159b1ce5b3a9d7e6cf53403a37e5abec19 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/aishell3_no_tone/preprocess.py @@ -0,0 +1,31 @@ +import glob +from data_gen.tts.base_preprocess import BasePreprocessor + + +class AiShell3Preprocess(BasePreprocessor): + def meta_data(self): + wavfn2text = {} + + def get_wavfn2text(dir_name): + d = open(f'{self.raw_data_dir}/{dir_name}/content.txt').readlines() + d = [l.strip().split("\t") for l in d if l.strip() != ''] + d = {l[0]: "".join(l[1].split(" ")[::2]) for l in d} + wavfn2text.update(d) + + get_wavfn2text('train') + get_wavfn2text('test') + + all_wavs = sorted( + glob.glob(f'{self.raw_data_dir}/train/wav/*/*.wav') + + glob.glob(f'{self.raw_data_dir}/test/wav/*/*.wav')) + for wav_fn in all_wavs: + wav_basename = wav_fn.split("/")[-1] + spk_name = wav_fn.split("/")[-2] + item_name = f'{spk_name}_{wav_basename}' + # yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': l} + # yield item_name, wav_fn, wavfn2text[wav_basename], spk_name + yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': wavfn2text[wav_basename], 'spk_name': spk_name} + + +if __name__ == "__main__": + AiShell3PreAlign().process() diff --git a/text_to_speech/egs/datasets/audio/biaobei/base_text2mel.yaml b/text_to_speech/egs/datasets/audio/biaobei/base_text2mel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f6771a2db7549a701cc061a622590839622e55ea --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei/base_text2mel.yaml @@ -0,0 +1,19 @@ +base_config: egs/egs_bases/tts/base_zh.yaml +raw_data_dir: 'data/raw/biaobei' +processed_data_dir: 'data/processed/biaobei' +binary_data_dir: 'data/binary/biaobei' +preprocess_cls: egs.datasets.audio.biaobei.preprocess.BiaobeiPreprocess + +ds_name: biaobei +binarization_args: + train_range: [ 871, -1 ] + test_range: [ 0, 523 ] + valid_range: [ 523, 871 ] +test_ids: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 68, 70, 74, 87, 110, 172, 190, 215, 231, 294, + 316, 324, 402, 422, 485, 500, 505, 508, 509, 519 ] +f0_min: 80 +f0_max: 600 +vocoder_ckpt: checkpoints/hifi_biaobei +num_valid_plots: 30 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/biaobei/preprocess.py b/text_to_speech/egs/datasets/audio/biaobei/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..44f48e22675a02e3a4b91a69caf7344f5a2982ef --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei/preprocess.py @@ -0,0 +1,16 @@ +from data_gen.tts.base_preprocess import BasePreprocessor +import re + + +class BiaobeiPreprocess(BasePreprocessor): + def meta_data(self): + input_dir = self.raw_data_dir + with open(f"{input_dir}/ProsodyLabeling/000001-010000.txt", encoding='utf-8') as f: + bb_lines = f.readlines()[::2] + for l_idx, l in (enumerate([re.sub("\#\d+", "", l.split('\t')[1].strip()) for l in bb_lines])): + item_name = f'{l_idx + 1:06d}' + wav_fn = f"{input_dir}/wav/{l_idx + 1:06d}.wav" + yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': l} + +if __name__ == "__main__": + BiaobeiPreprocess().process() diff --git a/text_to_speech/egs/datasets/audio/biaobei/ps_adv.yaml b/text_to_speech/egs/datasets/audio/biaobei/ps_adv.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b48047c0354bc8def97b4cc182c1f3ecfd8e562 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei/ps_adv.yaml @@ -0,0 +1,30 @@ +base_config: + - ./ps_flow.yaml + +task_cls: tasks.tts.ps_adv.PortaSpeechAdvTask + +lambda_mel_adv: 0.05 + +disc_win_num: 3 +mel_disc_hidden_size: 128 +disc_norm: in +disc_reduction: stack +disc_interval: 1 +disc_lr: 0.0001 +disc_start_steps: 0 +discriminator_scheduler_params: + gamma: 0.5 + step_size: 40000 +discriminator_optimizer_params: + eps: 1.0e-06 + weight_decay: 0.0 + +scheduler: rsqrt +lr: 0.4 # edit from 1. + +# copied from graph-porta-speech repo +predictor_grad: 0. + +max_tokens: 30000 +num_sanity_val_steps: 0 # steps of validation at the beginning +ds_workers: 1 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert.yaml b/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1c6090db6fa991da4f5ce604ddcb0e51a1c664ed --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert.yaml @@ -0,0 +1,4 @@ +base_config: + - ./ps_adv.yaml + +use_bert: True diff --git a/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert_cl.yaml b/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert_cl.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8ff52d17499754b2863b2cd696a1e3fd5a9a7139 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert_cl.yaml @@ -0,0 +1,9 @@ +base_config: + - ./ps_adv.yaml + +use_bert: True +task_cls: tasks.tts.ps_adv_mlm.PortaSpeechAdvMLMTask + +lambda_cl: 0.01 +lambda_mlm: 0.0 +mlm_start_steps: 0 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert_cl_v2.yaml b/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert_cl_v2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fe65693b74a5e3914db91fd8225cefaafa88a6a8 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert_cl_v2.yaml @@ -0,0 +1,11 @@ +base_config: + - ./ps_adv.yaml + +use_bert: True +task_cls: tasks.tts.ps_adv_mlm.PortaSpeechAdvMLMTask + +lambda_cl: 0.01 +lambda_mlm: 0.0 +mlm_start_steps: 0 + +cl_version: v2 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert_cl_v3.yaml b/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert_cl_v3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..19f3162129b7cc4f4e024fab526f51d320e13b7f --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert_cl_v3.yaml @@ -0,0 +1,11 @@ +base_config: + - ./ps_adv.yaml + +use_bert: True +task_cls: tasks.tts.ps_adv_mlm.PortaSpeechAdvMLMTask + +lambda_cl: 0.01 +lambda_mlm: 0.0 +mlm_start_steps: 0 + +cl_version: v3 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert_mlm.yaml b/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert_mlm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bb75f6b804d763a46641941755e5acc96233d0a6 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei/ps_adv_use_bert_mlm.yaml @@ -0,0 +1,9 @@ +base_config: + - ./ps_adv.yaml + +use_bert: True +task_cls: tasks.tts.ps_adv_mlm.PortaSpeechAdvMLMTask + +lambda_cl: 0.0 +lambda_mlm: 0.01 +mlm_start_steps: 0 diff --git a/text_to_speech/egs/datasets/audio/biaobei/ps_flow.yaml b/text_to_speech/egs/datasets/audio/biaobei/ps_flow.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c2ff58f280a58684ce5eebdc3302ea7ace9de19d --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei/ps_flow.yaml @@ -0,0 +1,13 @@ +base_config: + - egs/egs_bases/tts/ps_flow.yaml + - ./base_text2mel.yaml + +max_sentences: 64 +dur_level: word +use_word_encoder: false +enc_prenet: true +enc_pre_ln: false +fvae_encoder_type: wn +fvae_decoder_type: wn +text_encoder_postnet: false +warmup_updates: 8000 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/biaobei/synta.yaml b/text_to_speech/egs/datasets/audio/biaobei/synta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cbb4945a8726ac713975b598e391c26d6e090330 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei/synta.yaml @@ -0,0 +1,5 @@ +base_config: + - ./ps_adv.yaml + +task_cls: tasks.tts.synta.SyntaSpeechTask +use_gae_in_prior: true \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/biaobei/synta_use_bert.yaml b/text_to_speech/egs/datasets/audio/biaobei/synta_use_bert.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6db4543543bf29b60246ff7716526d38f4e1e94d --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei/synta_use_bert.yaml @@ -0,0 +1,4 @@ +base_config: + - ./synta.yaml + +use_bert: True diff --git a/text_to_speech/egs/datasets/audio/biaobei/synta_use_bert_v2.yaml b/text_to_speech/egs/datasets/audio/biaobei/synta_use_bert_v2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..90bda8872d11da84d11165208bf0ce01b5b1ad32 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei/synta_use_bert_v2.yaml @@ -0,0 +1,5 @@ +base_config: + - ./synta.yaml + +use_bert: True +use_gae_in_prior: false diff --git a/text_to_speech/egs/datasets/audio/biaobei/synta_v2.yaml b/text_to_speech/egs/datasets/audio/biaobei/synta_v2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5da24f0a93d41f580935e12b26be06f68e8b36df --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei/synta_v2.yaml @@ -0,0 +1,5 @@ +base_config: + - ./ps_adv.yaml + +task_cls: tasks.tts.synta.SyntaSpeechTask +use_gae_in_prior: false \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/biaobei_sing/base_text2mel.yaml b/text_to_speech/egs/datasets/audio/biaobei_sing/base_text2mel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..95ecd4cfc09ef3c8260bfe1846a1657d50a67d84 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei_sing/base_text2mel.yaml @@ -0,0 +1,23 @@ +base_config: egs/egs_bases/tts/base_zh.yaml +raw_data_dir: 'data/raw/biaobei' +processed_data_dir: 'data/processed/biaobei_sing' +binary_data_dir: 'data/binary/biaobei_sing' +preprocess_cls: egs.datasets.audio.biaobei.preprocess.BiaobeiPreprocess + +preprocess_args: + txt_processor: zh + use_tone: false + +ds_name: biaobei +binarization_args: + train_range: [ 871, -1 ] + test_range: [ 0, 523 ] + valid_range: [ 523, 871 ] +test_ids: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 68, 70, 74, 87, 110, 172, 190, 215, 231, 294, + 316, 324, 402, 422, 485, 500, 505, 508, 509, 519 ] +f0_min: 80 +f0_max: 600 +vocoder_ckpt: checkpoints/hifi_biaobei +num_valid_plots: 30 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/biaobei_sing/fs2.yaml b/text_to_speech/egs/datasets/audio/biaobei_sing/fs2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a821deca06b0c4b539d849f5dbbfe53dda8b0f67 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei_sing/fs2.yaml @@ -0,0 +1,34 @@ +base_config: + - egs/datasets/audio/biaobei_sing/base_text2mel.yaml + - egs/egs_bases/tts/fs.yaml + +task_cls: tasks.tts.fs.FastSpeechTask +use_bert: false +use_energy_embed: false +use_pitch_embed: true + +hidden_size: 256 +ffn_hidden_size: 768 +# required by clipdataset, not use by this task +clip_mel_max_len: 128 +num_samples_in_pair: 1 +min_word_freqs: 4 # we found it filters 50% words in LJSpeech + +# training config +max_tokens: 40000 +max_sentences: 48 +scheduler: rsqrt +lr: 0.4 # edit from 1. +max_updates: 160000 +predictor_grad: 0.02 + +num_cache: 20000 +num_sanity_val_steps: 5 + +# pitch extractor +pitch_extractor_ckpt: checkpoints/1121_pitch_extractor/model_ckpt_steps_120000.ckpt +pe_hidden_size: 256 +pe_predictor_hidden: -1 +pe_ffn_padding: SAME +pe_predictor_kernel: 5 +pitch_norm: log \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/biaobei_sing/preprocess.py b/text_to_speech/egs/datasets/audio/biaobei_sing/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..44f48e22675a02e3a4b91a69caf7344f5a2982ef --- /dev/null +++ b/text_to_speech/egs/datasets/audio/biaobei_sing/preprocess.py @@ -0,0 +1,16 @@ +from data_gen.tts.base_preprocess import BasePreprocessor +import re + + +class BiaobeiPreprocess(BasePreprocessor): + def meta_data(self): + input_dir = self.raw_data_dir + with open(f"{input_dir}/ProsodyLabeling/000001-010000.txt", encoding='utf-8') as f: + bb_lines = f.readlines()[::2] + for l_idx, l in (enumerate([re.sub("\#\d+", "", l.split('\t')[1].strip()) for l in bb_lines])): + item_name = f'{l_idx + 1:06d}' + wav_fn = f"{input_dir}/wav/{l_idx + 1:06d}.wav" + yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': l} + +if __name__ == "__main__": + BiaobeiPreprocess().process() diff --git a/text_to_speech/egs/datasets/audio/gigaspeech/extract.py b/text_to_speech/egs/datasets/audio/gigaspeech/extract.py new file mode 100644 index 0000000000000000000000000000000000000000..10210b089d81065aad46a3ffcb483e0645553e84 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/gigaspeech/extract.py @@ -0,0 +1,76 @@ +import os +import json +import tqdm + +from utils.commons.multiprocess_utils import multiprocess_run_tqdm +from functools import partial + +# def process_segment0(segment, opus_path, audio_out_dir, audio_id): +# segment_id = segment['sid'] +# item_name = segment_id +# begin_time = segment['begin_time'] +# end_time = segment['end_time'] +# out_wav_path = os.path.join(audio_out_dir, segment_id+'.wav') +# text = segment['text_tn'] +# text = text.replace("", ",") +# text = text.replace("", ".") +# text = text.replace("", "?") +# text = text.replace("", "!") +# text = text.lower() +# item_meta = {'item_name': item_name, 'wav_fn': out_wav_path, 'txt': text, 'spk_name': audio_id} +# return item_meta + +def process_segment(segment, opus_path, audio_out_dir, audio_id): + segment_id = segment['sid'] + item_name = segment_id + begin_time = segment['begin_time'] + end_time = segment['end_time'] + out_wav_path = os.path.join(audio_out_dir, segment_id+'.wav') + if os.path.exists(out_wav_path): + return + cmd = f'ffmpeg -v quiet -y -i {opus_path} -ac 1 -ar 16000 -ss {begin_time} -to {end_time} {out_wav_path}' + os.system(cmd) + text = segment['text_tn'] + text = text.replace("", ",") + text = text.replace("", ".") + text = text.replace("", "?") + text = text.replace("", "!") + text = text.lower() + item_meta = {'item_name': item_name, 'wav_fn': out_wav_path, 'txt': text, 'spk_name': audio_id} + return item_meta + +giga_root_dir = '/home/yezhenhui/datasets/raw/GigaSpeech/' +giga_out_dir = '/home/yezhenhui/datasets/raw/GigaSpeech_extract/' +os.makedirs(giga_out_dir, exist_ok=True) + +with open(f'{giga_root_dir}/GigaSpeech.json', 'r') as injson: + json_data = json.load(injson) + +meta = [] +out_meta_name = os.path.join(giga_out_dir, 'meta.json') + +audio_corpus = json_data['audios'] # list of dict, length 38131 + +args = [] +for audio_source in tqdm.tqdm(audio_corpus, total=len(audio_corpus), desc='loading the args'): + audio_id = audio_source['aid'] + subset = audio_source['subsets'] + audio_path = audio_source['path'] + opus_path = os.path.join(giga_root_dir, audio_path) + audio_out_dir = os.path.join(giga_out_dir, os.path.dirname(audio_path), audio_id) + os.makedirs(audio_out_dir, exist_ok=True) + segments = audio_source['segments'] + spk_name = audio_id + args += [{'segment': segment, 'opus_path': opus_path, 'audio_out_dir': audio_out_dir, 'audio_id': audio_id} for segment in segments] + +# for segment_meta in multiprocess_run_tqdm(process_segment0, args, desc='extracting...'): +# meta += segment_meta + +# with open(out_meta_name, 'w') as f: +# json.dump(meta, f) +# print("successful!") + +for segment_meta in multiprocess_run_tqdm(process_segment, args, num_workers=32, desc='extracting...'): + pass + + diff --git a/text_to_speech/egs/datasets/audio/gigaspeech/preprocess.py b/text_to_speech/egs/datasets/audio/gigaspeech/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..f1a286cf3afe843b889f091441f08b441f083572 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/gigaspeech/preprocess.py @@ -0,0 +1,25 @@ +from data_gen.tts.base_preprocess import BasePreprocessor +import glob, os + +class GigaSpeechPreprocess(BasePreprocessor): + def meta_data(self): + lj_raw_data_dir = 'data/raw/LJSpeech-1.1' + for l in list(open(f'{lj_raw_data_dir}/metadata.csv').readlines())[600:]: + item_name, _, txt = l.strip().split("|") + wav_fn = f"{lj_raw_data_dir}/wavs/{item_name}.wav" + txt = txt.lower() + yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': txt, 'spk_name': 'LJSPK'} + + dirs = sorted(glob.glob(f'{self.raw_data_dir}/*/*/*')) + for d in dirs: + txt_fn = glob.glob(f'{d}/*.txt')[0] + with open(txt_fn, 'r') as f: + item_name2txt = [l.strip().split(" ") for l in f.readlines()] + item_name2txt = {x[0]: ' '.join(x[1:]) for x in item_name2txt} + wav_fns = sorted(glob.glob(f'{d}/*.flac')) + for wav_fn in wav_fns: + item_name = os.path.basename(wav_fn)[:-5] + txt = item_name2txt[item_name].lower() + spk = item_name.split("-")[0] + yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': txt, 'spk_name': spk} + \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/librispeech/base_text2mel.yaml b/text_to_speech/egs/datasets/audio/librispeech/base_text2mel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5cb83e4b677e5f081501a3cde3546dabc1e72ee4 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/librispeech/base_text2mel.yaml @@ -0,0 +1,22 @@ +base_config: egs/egs_bases/tts/base.yaml +ds_name: librispeech +raw_data_dir: 'data/raw/LibriSpeech' +processed_data_dir: 'data/processed/librispeech' +binary_data_dir: 'data/binary/librispeech' +preprocess_cls: egs.datasets.audio.librispeech.preprocess.LibriSpeechPreprocess + +binarization_args: + train_range: [ 871, -1 ] + test_range: [ 0, 523 ] + valid_range: [ 523, 871 ] +test_ids: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 68, 70, 74, 87, 110, 172, 190, 215, 231, 294, + 316, 324, 402, 422, 485, 500, 505, 508, 509, 519 ] +f0_min: 80 +f0_max: 600 +vocoder_ckpt: checkpoints/hifi_librispeech +num_valid_plots: 30 + +max_frames: 3000 +num_spk: 999999 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/librispeech/preprocess.py b/text_to_speech/egs/datasets/audio/librispeech/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..a12360f374f2343632667e970cc1ebf13dca0c30 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/librispeech/preprocess.py @@ -0,0 +1,26 @@ +from data_gen.tts.base_preprocess import BasePreprocessor +import glob, os + +class LibriSpeechPreprocess(BasePreprocessor): + + def meta_data(self): + lj_raw_data_dir = 'data/raw/LJSpeech-1.1' + for l in list(open(f'{lj_raw_data_dir}/metadata.csv').readlines())[600:]: + item_name, _, txt = l.strip().split("|") + wav_fn = f"{lj_raw_data_dir}/wavs/{item_name}.wav" + txt = txt.lower() + yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': txt, 'spk_name': 'LJSPK'} + + dirs = sorted(glob.glob(f'{self.raw_data_dir}/*/*/*')) + for d in dirs: + txt_fn = glob.glob(f'{d}/*.txt')[0] + with open(txt_fn, 'r') as f: + item_name2txt = [l.strip().split(" ") for l in f.readlines()] + item_name2txt = {x[0]: ' '.join(x[1:]) for x in item_name2txt} + wav_fns = sorted(glob.glob(f'{d}/*.flac')) + for wav_fn in wav_fns: + item_name = os.path.basename(wav_fn)[:-5] + txt = item_name2txt[item_name].lower() + spk = item_name.split("-")[0] + yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': txt, 'spk_name': spk} + \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/libritts/base_text2mel.yaml b/text_to_speech/egs/datasets/audio/libritts/base_text2mel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f0b7f5e48eb0571bb2db90026427df40a1ec5b25 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/libritts/base_text2mel.yaml @@ -0,0 +1,25 @@ +ds_name: libritts +base_config: egs/egs_bases/tts/base.yaml +raw_data_dir: 'data/raw/LibriTTS' +processed_data_dir: 'data/processed/libritts' +binary_data_dir: 'data/binary/libritts' +preprocess_cls: egs.datasets.audio.libritts.preprocess.LibriTTSPreprocess +binarization_args: + train_range: [ 871, -1 ] + test_range: [ 0, 523 ] + valid_range: [ 523, 871 ] + shuffle: true + with_spk_id: true + with_spk_embed: false +test_ids: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 68, 70, 74, 87, 110, 172, 190, 215, 231, 294, + 316, 324, 402, 422, 485, 500, 505, 508, 509, 519 ] +f0_min: 80 +f0_max: 600 +vocoder: PWG +vocoder_ckpt: checkpoints/pwg_libritts +num_spk: 2000 +use_spk_id: true + +max_frames: 30000 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/libritts/ds.yaml b/text_to_speech/egs/datasets/audio/libritts/ds.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cb9ece548caa5741250e66e578d9e8b5eee46353 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/libritts/ds.yaml @@ -0,0 +1,32 @@ +base_config: + - research/clipspeech/egs/clipspeech_tts/lj/fs2_adv_baseline.yaml + - egs/datasets/audio/libritts/base_text2mel.yaml + +binary_data_dir: 'data/binary/libritts_clipspeech' +num_cache: 0 + +fs2_ckpt: checkpoints/1204_libritts/fs2_adv_baseline/model_ckpt_steps_160000.ckpt + +# spec_min and spec_max are calculated on the training set. +spec_min: [ -4.7574, -4.6783, -4.6431, -4.5832, -4.5390, -4.6771, -4.8089, -4.7672, + -4.5784, -4.7755, -4.7150, -4.8919, -4.8271, -4.7389, -4.6047, -4.7759, + -4.6799, -4.8201, -4.7823, -4.8262, -4.7857, -4.7545, -4.9358, -4.9733, + -5.1134, -5.1395, -4.9016, -4.8434, -5.0189, -4.8460, -5.0529, -4.9510, + -5.0217, -5.0049, -5.1831, -5.1445, -5.1015, -5.0281, -4.9887, -4.9916, + -4.9785, -4.9071, -4.9488, -5.0342, -4.9332, -5.0650, -4.8924, -5.0875, + -5.0483, -5.0848, -5.0655, -5.0279, -5.0015, -5.0792, -5.0636, -5.2413, + -5.1421, -5.1710, -5.3256, -5.0511, -5.1186, -5.0057, -5.0446, -5.1173, + -5.0325, -5.1085, -5.0053, -5.0755, -5.1176, -5.1004, -5.2153, -5.2757, + -5.3025, -5.2867, -5.2918, -5.3328, -5.2731, -5.2985, -5.2400, -5.2211 ] +spec_max: [ -0.5982, -0.0778, 0.1205, 0.2747, 0.4657, 0.5123, 0.5830, 0.7093, + 0.6461, 0.6101, 0.7316, 0.7715, 0.7681, 0.8349, 0.7815, 0.7591, + 0.7910, 0.7433, 0.7352, 0.6869, 0.6854, 0.6623, 0.5353, 0.6492, + 0.6909, 0.6106, 0.5761, 0.5236, 0.5638, 0.4054, 0.4545, 0.3407, + 0.3037, 0.3380, 0.1599, 0.1603, 0.2741, 0.2130, 0.1569, 0.1911, + 0.2324, 0.1586, 0.1221, 0.0341, -0.0558, 0.0553, -0.1153, -0.0933, + -0.1171, -0.0050, -0.1519, -0.1629, -0.0522, -0.0739, -0.2069, -0.2405, + -0.1244, -0.2582, -0.1361, -0.1575, -0.1442, 0.0513, -0.1567, -0.2000, + 0.0086, -0.0698, 0.1385, 0.0941, 0.1864, 0.1225, 0.1389, 0.1382, + 0.1670, 0.1007, 0.1444, 0.0888, 0.1998, 0.2280, 0.2932, 0.3047 ] + +max_tokens: 30000 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/libritts/preprocess.py b/text_to_speech/egs/datasets/audio/libritts/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..cdb6c7322de4a62e23dd586bee3ea145d2bc5f58 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/libritts/preprocess.py @@ -0,0 +1,13 @@ +from data_gen.tts.base_preprocess import BasePreprocessor +import glob, os + +class LibriTTSPreprocess(BasePreprocessor): + def meta_data(self): + wav_fns = sorted(glob.glob(f'{self.raw_data_dir}/*/*/*/*.wav')) + for wav_fn in wav_fns: + item_name = os.path.basename(wav_fn)[:-4] + txt_fn = f'{wav_fn[:-4]}.normalized.txt' + with open(txt_fn, 'r') as f: + txt = f.read() + spk_name = item_name.split("_")[0] + yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': txt, 'spk_name': spk_name} \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/libritts/ps_flow.yaml b/text_to_speech/egs/datasets/audio/libritts/ps_flow.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9e78e54f845104351a1bc7486d881856f243307b --- /dev/null +++ b/text_to_speech/egs/datasets/audio/libritts/ps_flow.yaml @@ -0,0 +1,3 @@ +base_config: + - egs/egs_bases/tts/ps_flow.yaml + - ./base_text2mel.yaml \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/libritts/synta.yaml b/text_to_speech/egs/datasets/audio/libritts/synta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e65bcbc7e584ee35bce17ae7337ceff945ab0df1 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/libritts/synta.yaml @@ -0,0 +1,19 @@ +base_config: + - egs/egs_bases/tts/synta.yaml + - ./base_text2mel.yaml + +lambda_mel_adv: 0.05 + +disc_win_num: 3 +mel_disc_hidden_size: 128 +disc_norm: in +disc_reduction: stack +disc_interval: 1 +disc_lr: 0.0001 +disc_start_steps: 0 +discriminator_scheduler_params: + gamma: 0.5 + step_size: 40000 +discriminator_optimizer_params: + eps: 1.0e-06 + weight_decay: 0.0 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/__pycache__/preprocess.cpython-38.pyc b/text_to_speech/egs/datasets/audio/lj/__pycache__/preprocess.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cc0070ac988fdf76d70fb6846852e13cfad8070 Binary files /dev/null and b/text_to_speech/egs/datasets/audio/lj/__pycache__/preprocess.cpython-38.pyc differ diff --git a/text_to_speech/egs/datasets/audio/lj/base_mel2wav.yaml b/text_to_speech/egs/datasets/audio/lj/base_mel2wav.yaml new file mode 100644 index 0000000000000000000000000000000000000000..626745c25ef7fc2254a778dc7297a33a11142694 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/base_mel2wav.yaml @@ -0,0 +1,4 @@ +base_config: egs/egs_bases/tts/vocoder/base.yaml +raw_data_dir: 'data/raw/LJSpeech-1.1' +processed_data_dir: 'data/processed/ljspeech' +binary_data_dir: 'data/binary/ljspeech_wav' diff --git a/text_to_speech/egs/datasets/audio/lj/base_text2mel.yaml b/text_to_speech/egs/datasets/audio/lj/base_text2mel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3efe587567493b4ebf8f5da2a957662a7e16e2a0 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/base_text2mel.yaml @@ -0,0 +1,18 @@ +ds_name: ljspeech +base_config: egs/egs_bases/tts/base.yaml +raw_data_dir: 'data/raw/LJSpeech-1.1' +processed_data_dir: 'data/processed/ljspeech' +binary_data_dir: 'data/binary/ljspeech' +preprocess_cls: egs.datasets.audio.lj.preprocess.LJPreprocess +binarization_args: + train_range: [ 871, -1 ] + test_range: [ 0, 523 ] + valid_range: [ 523, 871 ] +test_ids: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 68, 70, 74, 87, 110, 172, 190, 215, 231, 294, + 316, 324, 402, 422, 485, 500, 505, 508, 509, 519 ] +f0_min: 80 +f0_max: 600 +vocoder_ckpt: checkpoints/hifi_lj +num_valid_plots: 30 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/ds.yaml b/text_to_speech/egs/datasets/audio/lj/ds.yaml new file mode 100644 index 0000000000000000000000000000000000000000..18cce49ae65e03497acdc471947cb3cda690ce3b --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/ds.yaml @@ -0,0 +1,30 @@ +base_config: + - ./fs_adv.yaml + - egs/egs_bases/tts/ds.yaml + + +fs2_ckpt: checkpoints/1130_lj/fs2_adv_baseline/model_ckpt_steps_160000.ckpt + +# spec_min and spec_max are calculated on the training set. +spec_min: [ -4.7574, -4.6783, -4.6431, -4.5832, -4.5390, -4.6771, -4.8089, -4.7672, + -4.5784, -4.7755, -4.7150, -4.8919, -4.8271, -4.7389, -4.6047, -4.7759, + -4.6799, -4.8201, -4.7823, -4.8262, -4.7857, -4.7545, -4.9358, -4.9733, + -5.1134, -5.1395, -4.9016, -4.8434, -5.0189, -4.8460, -5.0529, -4.9510, + -5.0217, -5.0049, -5.1831, -5.1445, -5.1015, -5.0281, -4.9887, -4.9916, + -4.9785, -4.9071, -4.9488, -5.0342, -4.9332, -5.0650, -4.8924, -5.0875, + -5.0483, -5.0848, -5.0655, -5.0279, -5.0015, -5.0792, -5.0636, -5.2413, + -5.1421, -5.1710, -5.3256, -5.0511, -5.1186, -5.0057, -5.0446, -5.1173, + -5.0325, -5.1085, -5.0053, -5.0755, -5.1176, -5.1004, -5.2153, -5.2757, + -5.3025, -5.2867, -5.2918, -5.3328, -5.2731, -5.2985, -5.2400, -5.2211 ] +spec_max: [ -0.5982, -0.0778, 0.1205, 0.2747, 0.4657, 0.5123, 0.5830, 0.7093, + 0.6461, 0.6101, 0.7316, 0.7715, 0.7681, 0.8349, 0.7815, 0.7591, + 0.7910, 0.7433, 0.7352, 0.6869, 0.6854, 0.6623, 0.5353, 0.6492, + 0.6909, 0.6106, 0.5761, 0.5236, 0.5638, 0.4054, 0.4545, 0.3407, + 0.3037, 0.3380, 0.1599, 0.1603, 0.2741, 0.2130, 0.1569, 0.1911, + 0.2324, 0.1586, 0.1221, 0.0341, -0.0558, 0.0553, -0.1153, -0.0933, + -0.1171, -0.0050, -0.1519, -0.1629, -0.0522, -0.0739, -0.2069, -0.2405, + -0.1244, -0.2582, -0.1361, -0.1575, -0.1442, 0.0513, -0.1567, -0.2000, + 0.0086, -0.0698, 0.1385, 0.0941, 0.1864, 0.1225, 0.1389, 0.1382, + 0.1670, 0.1007, 0.1444, 0.0888, 0.1998, 0.2280, 0.2932, 0.3047 ] + +max_tokens: 30000 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/fs.yaml b/text_to_speech/egs/datasets/audio/lj/fs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..22bc563d16aa3c6b2c03a7559afc9c800aa85f27 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/fs.yaml @@ -0,0 +1,3 @@ +base_config: + - egs/egs_bases/tts/fs.yaml + - ./base_text2mel.yaml diff --git a/text_to_speech/egs/datasets/audio/lj/fs2_orig.yaml b/text_to_speech/egs/datasets/audio/lj/fs2_orig.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f872891409c6c3b745f17909e3ed4d2540a0e2f3 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/fs2_orig.yaml @@ -0,0 +1,4 @@ +base_config: + - egs/egs_bases/tts/fs2_orig.yaml + - ./base_text2mel.yaml +binary_data_dir: 'data/binary/ljspeech_cwt' \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/fs_adv.yaml b/text_to_speech/egs/datasets/audio/lj/fs_adv.yaml new file mode 100644 index 0000000000000000000000000000000000000000..081ca3bacd1f161e05e4aa298036e7a4fe23c38d --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/fs_adv.yaml @@ -0,0 +1,3 @@ +base_config: + - egs/egs_bases/tts/fs_adv.yaml + - ./base_text2mel.yaml diff --git a/text_to_speech/egs/datasets/audio/lj/fs_adv_use_bert.yaml b/text_to_speech/egs/datasets/audio/lj/fs_adv_use_bert.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f2a61227f9a53d28b620ba1ea7c7610c395fc28d --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/fs_adv_use_bert.yaml @@ -0,0 +1,10 @@ +base_config: + - ./fs_adv.yaml + +use_pitch_embed: false + +use_bert: true +grad_bert: 0.1 +load_bert_from_pretrained: true +bert_trainable_start_block: 0 +bert_num_blocks: 12 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/hifigan.yaml b/text_to_speech/egs/datasets/audio/lj/hifigan.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5e840cd2bc5b1c3860f55bb1e36b3be1b41cc9e2 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/hifigan.yaml @@ -0,0 +1,3 @@ +base_config: + - egs/egs_bases/tts/vocoder/hifigan.yaml + - ./base_mel2wav.yaml \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/preprocess.py b/text_to_speech/egs/datasets/audio/lj/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..a3aa6b5a91fbfde53af0d2d43748d439399ca307 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/preprocess.py @@ -0,0 +1,9 @@ +from text_to_speech.data_gen.tts.base_preprocess import BasePreprocessor + + +class LJPreprocess(BasePreprocessor): + def meta_data(self): + for l in open(f'{self.raw_data_dir}/metadata.csv').readlines(): + item_name, _, txt = l.strip().split("|") + wav_fn = f"{self.raw_data_dir}/wavs/{item_name}.wav" + yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': txt} diff --git a/text_to_speech/egs/datasets/audio/lj/ps_adv.yaml b/text_to_speech/egs/datasets/audio/lj/ps_adv.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1893ce3ce881f064b784ff497f7d1837fe3b73b3 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/ps_adv.yaml @@ -0,0 +1,30 @@ +base_config: + - ./ps_flow.yaml + +task_cls: tasks.tts.ps_adv.PortaSpeechAdvTask + +lambda_mel_adv: 0.05 + +disc_win_num: 3 +mel_disc_hidden_size: 128 +disc_norm: in +disc_reduction: stack +disc_interval: 1 +disc_lr: 0.0001 +disc_start_steps: 0 +discriminator_scheduler_params: + gamma: 0.5 + step_size: 40000 +discriminator_optimizer_params: + eps: 1.0e-06 + weight_decay: 0.0 + +scheduler: rsqrt +lr: 0.4 # edit from 1. + +predictor_grad: 0.02 + +max_tokens: 30000 +num_sanity_val_steps: 0 # steps of validation at the beginning +ds_workers: 1 + diff --git a/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert.yaml b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert.yaml new file mode 100644 index 0000000000000000000000000000000000000000..10b458353a1ee61715b44140afd6b6dd851b2dd7 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert.yaml @@ -0,0 +1,8 @@ +base_config: + - ./ps_adv.yaml + +use_bert: true +grad_bert: 0.1 +load_bert_from_pretrained: true +bert_trainable_start_block: 0 +bert_num_blocks: 12 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl.yaml b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8ff52d17499754b2863b2cd696a1e3fd5a9a7139 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl.yaml @@ -0,0 +1,9 @@ +base_config: + - ./ps_adv.yaml + +use_bert: True +task_cls: tasks.tts.ps_adv_mlm.PortaSpeechAdvMLMTask + +lambda_cl: 0.01 +lambda_mlm: 0.0 +mlm_start_steps: 0 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl_v2.yaml b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl_v2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fe65693b74a5e3914db91fd8225cefaafa88a6a8 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl_v2.yaml @@ -0,0 +1,11 @@ +base_config: + - ./ps_adv.yaml + +use_bert: True +task_cls: tasks.tts.ps_adv_mlm.PortaSpeechAdvMLMTask + +lambda_cl: 0.01 +lambda_mlm: 0.0 +mlm_start_steps: 0 + +cl_version: v2 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl_v3.yaml b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl_v3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..19f3162129b7cc4f4e024fab526f51d320e13b7f --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl_v3.yaml @@ -0,0 +1,11 @@ +base_config: + - ./ps_adv.yaml + +use_bert: True +task_cls: tasks.tts.ps_adv_mlm.PortaSpeechAdvMLMTask + +lambda_cl: 0.01 +lambda_mlm: 0.0 +mlm_start_steps: 0 + +cl_version: v3 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl_v4.yaml b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl_v4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6b07ff0b99e72328c0c4e26d37dc424100a0a691 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl_v4.yaml @@ -0,0 +1,12 @@ +base_config: + - ./ps_adv.yaml + +use_bert: True +task_cls: tasks.tts.ps_adv_mlm.PortaSpeechAdvMLMTask + +lambda_cl: 0.01 +lambda_mlm: 0.0 +mlm_start_steps: 0 + +cl_version: v4 +cl_ds_name: wiki \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl_v5.yaml b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl_v5.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a7530719320e4d412d27cb5b776f1646f4ac416e --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_cl_v5.yaml @@ -0,0 +1,12 @@ +base_config: + - ./ps_adv.yaml + +use_bert: True +task_cls: tasks.tts.ps_adv_mlm.PortaSpeechAdvMLMTask + +lambda_cl: 0.01 +lambda_mlm: 0.0 +mlm_start_steps: 0 + +cl_version: v5 +cl_ds_name: nli \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_mlm.yaml b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_mlm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bb75f6b804d763a46641941755e5acc96233d0a6 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_mlm.yaml @@ -0,0 +1,9 @@ +base_config: + - ./ps_adv.yaml + +use_bert: True +task_cls: tasks.tts.ps_adv_mlm.PortaSpeechAdvMLMTask + +lambda_cl: 0.0 +lambda_mlm: 0.01 +mlm_start_steps: 0 diff --git a/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_mlm_v2.yaml b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_mlm_v2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..deef662e68e9921eb21c46dc99cd819cc57e47b6 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/ps_adv_use_bert_mlm_v2.yaml @@ -0,0 +1,11 @@ +base_config: + - ./ps_adv.yaml + +use_bert: True +task_cls: tasks.tts.ps_adv_mlm.PortaSpeechAdvMLMTask + +lambda_cl: 0.0 +lambda_mlm: 0.01 +mlm_start_steps: 0 + +cl_ds_name: wiki \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/ps_flow.yaml b/text_to_speech/egs/datasets/audio/lj/ps_flow.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c2ff58f280a58684ce5eebdc3302ea7ace9de19d --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/ps_flow.yaml @@ -0,0 +1,13 @@ +base_config: + - egs/egs_bases/tts/ps_flow.yaml + - ./base_text2mel.yaml + +max_sentences: 64 +dur_level: word +use_word_encoder: false +enc_prenet: true +enc_pre_ln: false +fvae_encoder_type: wn +fvae_decoder_type: wn +text_encoder_postnet: false +warmup_updates: 8000 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/ps_flow_small.yaml b/text_to_speech/egs/datasets/audio/lj/ps_flow_small.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9560428297cf4073ebd581fbf82ea6e54054bfe4 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/ps_flow_small.yaml @@ -0,0 +1,13 @@ +base_config: + - egs/egs_bases/tts/ps_flow_small.yaml + - ./base_text2mel.yaml + +max_sentences: 128 +dur_level: word +use_word_encoder: false +enc_prenet: true +enc_pre_ln: false +fvae_encoder_type: wn +fvae_decoder_type: wn +text_encoder_postnet: false +warmup_updates: 8000 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/lj/synta.yaml b/text_to_speech/egs/datasets/audio/lj/synta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..64f892ea99c4dfa1e6daaa0aaabfe1ad01e5900c --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/synta.yaml @@ -0,0 +1,4 @@ +base_config: + - ./ps_adv.yaml + +task_cls: tasks.tts.synta.SyntaSpeechTask diff --git a/text_to_speech/egs/datasets/audio/lj/synta_use_bert.yaml b/text_to_speech/egs/datasets/audio/lj/synta_use_bert.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6db4543543bf29b60246ff7716526d38f4e1e94d --- /dev/null +++ b/text_to_speech/egs/datasets/audio/lj/synta_use_bert.yaml @@ -0,0 +1,4 @@ +base_config: + - ./synta.yaml + +use_bert: True diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/base_mel2wav.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/base_mel2wav.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d14340c2c0571b4f9e4b59e7fbc004892d41d26 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/base_mel2wav.yaml @@ -0,0 +1,21 @@ +base_config: + - egs/egs_bases/tts/base_zh.yaml + +raw_data_dir: 'data/raw/AISHELL-3' +processed_data_dir: 'data/processed/aishell3' +binary_data_dir: 'data/binary/aishell3_fs2s' + +audio_sample_rate: 16000 +fmin: 0 +fmax: 8000 +pre_align_cls: egs.datasets.audio.aishell3.pre_align.AiShell3PreAlign + +use_spk_id: true +num_spk: 220 +# test_num: 200 +binarization_args: + shuffle: true + trim_eos_bos: true +use_pitch_embed: false +vocoder: vocoders.hifigan.HifiGAN +vocoder_ckpt: 'pretrained/0707_hifigan_as3_ori_1' \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/base_text2mel.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/base_text2mel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..83b34320a83b54d19664b8fa99d85e1112d72d65 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/base_text2mel.yaml @@ -0,0 +1,21 @@ +base_config: + - egs/egs_bases/tts/base_zh.yaml + +ds_name: wenetspeech +raw_data_dir: 'data/raw/WenetSpeech' +processed_data_dir: 'data/processed/wenetspeech' +binary_data_dir: 'data/binary/wenetspeech' +preprocess_cls: egs.datasets.audio.wenetspeech.preprocess.WenetSpeechPreprocess + +use_spk_id: false +use_spk_embed: true +test_num: 200 +binarization_args: + trim_eos_bos: false + with_spk_embed: false + +# use_pitch_embed: false +vocoder: vocoders.hifigan.HifiGAN +vocoder_ckpt: 'pretrained/hifigan_hifitts' + +language: zh \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/conformer_asr.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/conformer_asr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..506bc026588905cb5389a59e1d62d75a4e7a1939 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/conformer_asr.yaml @@ -0,0 +1,5 @@ +base_config: egs/egs_bases/asr/base.yaml +raw_data_dir: 'data/raw/WenetSpeech' +processed_data_dir: 'data/processed/wenetspeech' +binary_data_dir: 'data/binary/wenetspeech' +pre_align_cls: egs.datasets.audio.wenetspeech.pre_align.WenetSpeechPreAlign \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/conformer_ctc.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/conformer_ctc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3f40156d44f74541145f81928bcdebe85d882bcc --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/conformer_ctc.yaml @@ -0,0 +1,6 @@ +base_config: ./conformer_asr.yaml +task_cls: tasks.asr.transformer_ctc.TransformerCTCTask +max_tokens: 40000 +use_phone: True + +binarizer_cls: data_gen.tts.binarizer_zh.ZhBinarizer \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/fs2.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/fs2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6b3477f063c8993a7c630c9cdf164ecbbe4997c9 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/fs2.yaml @@ -0,0 +1,3 @@ +base_config: + - egs/egs_bases/tts/fs2.yaml + - ./base_text2mel.yaml \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/fs2_adv.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/fs2_adv.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7abd4e7eb2babb6a6e0bf963839ae13a7c1c3047 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/fs2_adv.yaml @@ -0,0 +1,3 @@ +base_config: + - egs/egs_bases/tts/fs2_adv.yaml + - ./base_text2mel.yaml \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/fs2s.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/fs2s.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5946f9f43185bbc9c90a766cf34c713f28ad297 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/fs2s.yaml @@ -0,0 +1,30 @@ +base_config: + - egs/egs_bases/tts/fs2s.yaml + - ./base_mel2wav.yaml +binary_data_dir: 'data/binary/aishell3_fs2s' +binarization_args: + with_wav: true + with_align: true + with_word: true + with_txt: true + reset_phone_dict: true + reset_word_dict: true + shuffle: true + trim_eos_bos: true + with_align: true + with_f0: true + with_f0cwt: false + with_linear: false + with_spk_embed: false + with_spk_id: true + +mel_loss_no_noise: False +dec_inp_add_noise: False + +num_test_samples: 200 +mel_loss: l1 +use_pitch_embed: true +use_uv: true +use_energy_embed: false +predictor_grad: 0.01 +test_num: 200 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/fs2s_end2end.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/fs2s_end2end.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4a0d12abc6912e9ea5f9f5312db13b06f8a850d9 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/fs2s_end2end.yaml @@ -0,0 +1,32 @@ +base_config: + - egs/egs_bases/tts/fs2s_end2end.yaml + - ./base_mel2wav.yaml +binary_data_dir: 'data/binary/aishell3_fs2s' +binarization_args: + with_wav: true + with_align: true + with_word: true + with_txt: true + reset_phone_dict: true + reset_word_dict: true + shuffle: true + trim_eos_bos: true + with_align: true + with_f0: true + with_f0cwt: false + with_linear: false + with_spk_embed: false + with_spk_id: true + +mel_loss_no_noise: False +dec_inp_add_noise: False + +num_test_samples: 200 +mel_loss: l1 +use_pitch_embed: true +use_uv: true +use_energy_embed: false +predictor_grad: 0.01 +test_num: 200 + +use_wav2vec_loss: False \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/hifigan.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/hifigan.yaml new file mode 100644 index 0000000000000000000000000000000000000000..65f895904fed74aed9e3a7ff12d49b9713eac399 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/hifigan.yaml @@ -0,0 +1,9 @@ +base_config: + - egs/egs_bases/tts/vocoder/hifigan.yaml +processed_data_dir: 'data/processed/aishell3' +binary_data_dir: 'data/binary/aishell3_wav' +audio_num_mel_bins: 80 +audio_sample_rate: 16000 +fmin: 0 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) +fmax: 8000 # To be increased/reduced depending on data. +upsample_rates: [ 8,8,2,2 ] diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/preprocess.py b/text_to_speech/egs/datasets/audio/wenetspeech/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..28cc866b2fa1cff9e5d820e7f0936325fe86adad --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/preprocess.py @@ -0,0 +1,33 @@ +import glob +from data_gen.tts.base_preprocess import BasePreprocessor + + +class WenetSpeechPreprocess(BasePreprocessor): + def meta_data(self): + wavfn2text = {} + + def get_wavfn2text(): + d = open(f'{self.raw_data_dir}/extracted_wav/wenetspeech.txt').readlines() + d = [l.strip().split("\t") for l in d if l.strip() != '' and 'podcast' in l] + d = {l[0]: l[1] for l in d} + wavfn2text.update(d) + + get_wavfn2text() + + all_wavs = sorted(wavfn2text.keys()) + + for wav_fn in all_wavs: + wav_basename = wav_fn.split("/")[-2]+"_"+wav_fn.split("/")[-1] + spk_name = 'asr_data' + item_name = f'{spk_name}_{wav_basename}' + yield { + 'item_name': item_name, + 'wav_fn': wav_fn.replace("/home/jzy/dict_idea/NeuralSeq/", ""), + 'txt': wavfn2text[wav_fn], + 'spk_name': spk_name + } + + + +if __name__ == "__main__": + WenetSpeechPreprocess.process() diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/ps_adv.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/ps_adv.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c66412b76a4aa531a3ceded02361c53b05652244 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/ps_adv.yaml @@ -0,0 +1,3 @@ +base_config: + - egs/egs_bases/tts/ps_adv.yaml + - ./base_text2mel.yaml \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/ps_adv_dict.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/ps_adv_dict.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b474f7ab17aa8bcac1f383d017aa756a70656cef --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/ps_adv_dict.yaml @@ -0,0 +1,20 @@ +base_config: + - egs/egs_bases/tts/ps_adv_dict.yaml + - ./base_text2mel.yaml + +pre_align_args: + txt_processor: zh_dict + +use_perceptual_loss: false +use_APC: false +asr_strides: +- 2 +- 2 +- 1 + +value_embedding_size: 185 +use_word2vec: false +exp_name: '' + +use_dp: false +use_dict: false \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/ps_adv_nlr.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/ps_adv_nlr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..53c7cf36c9a0fb44d40e3ae924a1a05dee4e7960 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/ps_adv_nlr.yaml @@ -0,0 +1,6 @@ +base_config: + - egs/egs_bases/tts/ps_adv_nlr.yaml + - ./base_text2mel.yaml + +binarizer_cls: data_gen.tts.binarizer_zh_nlr.ZhBinarizer +binary_data_dir: 'data/binary/aishell3_nlr' \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/ps_adv_word.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/ps_adv_word.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c71fc9c90826bc7d15563dbb2c633478bcc29f1c --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/ps_adv_word.yaml @@ -0,0 +1,3 @@ +base_config: + - egs/egs_bases/tts/ps_adv_word.yaml + - ./base_text2mel.yaml \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/pwg.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/pwg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7a3f0d5e99faef6c10e5374c4c2ae42e50ebd2bf --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/pwg.yaml @@ -0,0 +1,6 @@ +base_config: egs/egs_bases/tts/vocoder/pwg.yaml +processed_data_dir: 'data/processed/aishell3' +binary_data_dir: 'data/binary/aishell3_wav' +num_spk: 500 +generator_params: + kernel_size: 5 \ No newline at end of file diff --git a/text_to_speech/egs/datasets/audio/wenetspeech/transformer_ph_recog_ce.yaml b/text_to_speech/egs/datasets/audio/wenetspeech/transformer_ph_recog_ce.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e93d87380255de2e6cc7a56a48cb68f67b412501 --- /dev/null +++ b/text_to_speech/egs/datasets/audio/wenetspeech/transformer_ph_recog_ce.yaml @@ -0,0 +1,6 @@ +base_config: ./conformer_asr.yaml +task_cls: tasks.asr.transformer_asr.TransformerASRTask +max_tokens: 30000 +use_phone: True + +binarizer_cls: data_gen.tts.binarizer_zh.ZhBinarizer \ No newline at end of file diff --git a/text_to_speech/egs/egs_bases/asr/base.yaml b/text_to_speech/egs/egs_bases/asr/base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d1c53f6ccd07a51c95d9f2e9ec2bec4506237cf7 --- /dev/null +++ b/text_to_speech/egs/egs_bases/asr/base.yaml @@ -0,0 +1,34 @@ +base_config: ./transformer_tts.yaml +pre_align_args: + trim_sil: true +binarization_args: + shuffle: true + with_txt: true + with_wav: false + with_align: false + with_spk_embed: false + with_f0: false + with_f0cwt: false +num_spk: 9999999 +binarizer_cls: data_gen.asr.asr_binarize.ASRBinarizer +strides: [ 2,2,1 ] +enc_ffn_kernel_size: 15 +encoder_hidden_size: 256 + +model_type: trans +# for LSTM decoder +decoder_hidden_size: 512 +decoder_rnn_dim: 512 +p_attention_dropout: 0.05 +p_decoder_dropout: 0.05 +attention_rnn_dim: 512 +attention_dim: 256 +attention_location_n_filters: 16 +attention_location_kernel_size: 15 + +max_frames: 3000 +enc_layers: 8 +dec_layers: 4 +dec_ffn_kernel_size: 1 +max_tokens: 40000 +max_updates: 400000 diff --git a/text_to_speech/egs/egs_bases/asr/transformer_tts.yaml b/text_to_speech/egs/egs_bases/asr/transformer_tts.yaml new file mode 100644 index 0000000000000000000000000000000000000000..35362cecfd26ed74174675c7a9b934db49edf2ac --- /dev/null +++ b/text_to_speech/egs/egs_bases/asr/transformer_tts.yaml @@ -0,0 +1,12 @@ +base_config: ./tts_base.yaml +task_cls: tasks.tts.transformer_tts.TransformerTtsTask +binarization_args: + with_align: true +attn_constraint: true +stop_token_weight: 5.0 +prenet_dropout: 0.5 +prenet_hidden_size: 64 +hidden_size: 384 +warmup_updates: 16000 +lr: 1.0 +teacher_forcing_infer: false \ No newline at end of file diff --git a/text_to_speech/egs/egs_bases/asr/tts_base.yaml b/text_to_speech/egs/egs_bases/asr/tts_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bf2b57f4b857d965897fb351976400818a86877f --- /dev/null +++ b/text_to_speech/egs/egs_bases/asr/tts_base.yaml @@ -0,0 +1,117 @@ +# task +base_config: ../config_base.yaml +task_cls: '' +############# +# dataset +############# +raw_data_dir: '' +processed_data_dir: '' +binary_data_dir: '' +dict_dir: '' +pre_align_cls: '' +binarizer_cls: data_gen.tts.base_binarizer.BaseBinarizer +mfa_version: 2 +pre_align_args: + nsample_per_mfa_group: 1000 + txt_processor: en + use_tone: true # for ZH + sox_resample: false + sox_to_wav: false + allow_no_txt: false + trim_sil: false + denoise: false +binarization_args: + shuffle: false + with_txt: true + with_wav: false + with_align: true + with_spk_embed: true + with_spk_id: true + with_f0: true + with_f0cwt: false + with_linear: false + with_word: true + trim_eos_bos: false + reset_phone_dict: true + reset_word_dict: true +word_size: 30000 +pitch_extractor: parselmouth + +loud_norm: false +endless_ds: true + +test_num: 100 +min_frames: 0 +max_frames: 1548 +frames_multiple: 1 +max_input_tokens: 1550 +audio_num_mel_bins: 80 +audio_sample_rate: 22050 +hop_size: 256 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate) +win_size: 1024 # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate) +fmin: 80 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) +fmax: 7600 # To be increased/reduced depending on data. +fft_size: 1024 # Extra window size is filled with 0 paddings to match this parameter +min_level_db: -100 +ref_level_db: 20 +griffin_lim_iters: 60 +num_spk: 1 +mel_vmin: -6 +mel_vmax: 1.5 +ds_workers: 1 + +######### +# model +######### +dropout: 0.1 +enc_layers: 4 +dec_layers: 4 +hidden_size: 256 +num_heads: 2 +enc_ffn_kernel_size: 9 +dec_ffn_kernel_size: 9 +ffn_act: gelu +ffn_padding: 'SAME' +use_spk_id: false +use_split_spk_id: false +use_spk_embed: false +mel_loss: l1 + + +########### +# optimization +########### +lr: 2.0 +scheduler: rsqrt # rsqrt|none +warmup_updates: 8000 +optimizer_adam_beta1: 0.9 +optimizer_adam_beta2: 0.98 +weight_decay: 0 +clip_grad_norm: 1 +clip_grad_value: 0 + + +########### +# train and eval +########### +use_word_input: false +max_tokens: 30000 +max_sentences: 100000 +max_valid_sentences: 1 +max_valid_tokens: 60000 +valid_infer_interval: 10000 +train_set_name: 'train' +train_sets: '' +valid_set_name: 'valid' +test_set_name: 'test' +num_test_samples: 0 +num_valid_plots: 10 +test_ids: [ ] +vocoder: pwg +vocoder_ckpt: '' +vocoder_denoise_c: 0.0 +profile_infer: false +out_wav_norm: false +save_gt: true +save_f0: false +gen_dir_name: '' \ No newline at end of file diff --git a/text_to_speech/egs/egs_bases/config_base.yaml b/text_to_speech/egs/egs_bases/config_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..504b21063de4d76cfc9117281e93bd93b97efa05 --- /dev/null +++ b/text_to_speech/egs/egs_bases/config_base.yaml @@ -0,0 +1,41 @@ +# task +binary_data_dir: '' +work_dir: '' # experiment directory. +infer: false # infer +amp: false +seed: 1234 +debug: false +save_codes: ['tasks', 'modules', 'egs', 'research'] + +############# +# dataset +############# +ds_workers: 0 +test_num: 100 +endless_ds: true +sort_by_len: true + +######### +# train and eval +######### +print_nan_grads: false +load_ckpt: '' +save_best: false +num_ckpt_keep: 1 +clip_grad_norm: 0 +accumulate_grad_batches: 1 +tb_log_interval: 100 +num_sanity_val_steps: 5 # steps of validation at the beginning +check_val_every_n_epoch: 10 +val_check_interval: 2000 +valid_monitor_key: 'val_loss' +valid_monitor_mode: 'min' +max_epochs: 1000 +max_updates: 1000000 +max_tokens: 40000 +max_sentences: 100000 +max_valid_tokens: -1 +max_valid_sentences: -1 +eval_max_batches: -1 +resume_from_checkpoint: 0 +rename_tmux: true \ No newline at end of file diff --git a/text_to_speech/egs/egs_bases/tts/base.yaml b/text_to_speech/egs/egs_bases/tts/base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bd29bea1a62c19650c9ca455fb4145c9919c091b --- /dev/null +++ b/text_to_speech/egs/egs_bases/tts/base.yaml @@ -0,0 +1,56 @@ +# task +base_config: + - ../config_base.yaml + - ./dataset_params.yaml + +############# +# dataset in training +############# +endless_ds: true +min_frames: 0 +max_frames: 1548 +frames_multiple: 1 +max_input_tokens: 1550 +ds_workers: 0 + +######### +# model +######### +use_spk_id: false +use_spk_embed: false +mel_losses: "ssim:0.5|l1:0.5" + +########### +# optimization +########### +lr: 0.0005 +scheduler: warmup # rsqrt|warmup|none +warmup_updates: 4000 +optimizer_adam_beta1: 0.9 +optimizer_adam_beta2: 0.98 +weight_decay: 0 +clip_grad_norm: 1 +clip_grad_value: 0 + + +########### +# train and eval +########### +use_word_input: false +max_valid_sentences: 1 +max_valid_tokens: 60000 +valid_infer_interval: 10000 +train_set_name: 'train' +train_sets: '' +valid_set_name: 'valid' +test_set_name: 'test' +num_valid_plots: 10 +test_ids: [ ] +test_input_yaml: '' +vocoder: HifiGAN +vocoder_ckpt: '' +profile_infer: false +out_wav_norm: false +save_gt: true +save_f0: false +gen_dir_name: '' \ No newline at end of file diff --git a/text_to_speech/egs/egs_bases/tts/base_zh.yaml b/text_to_speech/egs/egs_bases/tts/base_zh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6e6400eebc67900e3bf7e71e4575762a6b5fdc3e --- /dev/null +++ b/text_to_speech/egs/egs_bases/tts/base_zh.yaml @@ -0,0 +1,6 @@ +base_config: ./base.yaml +preprocess_args: + txt_processor: zh + use_tone: true + +word_size: 3000 \ No newline at end of file diff --git a/text_to_speech/egs/egs_bases/tts/dataset_params.yaml b/text_to_speech/egs/egs_bases/tts/dataset_params.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4d98d3e83d93eb5532f93a7a0ba35c3f81ee301a --- /dev/null +++ b/text_to_speech/egs/egs_bases/tts/dataset_params.yaml @@ -0,0 +1,52 @@ +audio_num_mel_bins: 80 +audio_sample_rate: 22050 +hop_size: 256 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate) +win_size: 1024 # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate) +fft_size: 1024 # Extra window size is filled with 0 paddings to match this parameter +fmin: 80 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) +fmax: 7600 # To be increased/reduced depending on data. +f0_min: 80 +f0_max: 800 +griffin_lim_iters: 30 +pitch_extractor: parselmouth +num_spk: 1 +mel_vmin: -6 +mel_vmax: 1.5 +loud_norm: false + +raw_data_dir: '' +processed_data_dir: '' +binary_data_dir: '' +preprocess_cls: '' +binarizer_cls: data_gen.tts.base_binarizer.BaseBinarizer +preprocess_args: + nsample_per_mfa_group: 1000 + # text process + txt_processor: en + use_mfa: true + with_phsep: true + reset_phone_dict: true + reset_word_dict: true + add_eos_bos: true + # mfa + mfa_group_shuffle: false + mfa_offset: 0.02 + # wav processors + wav_processors: [ ] + save_sil_mask: true + vad_max_silence_length: 12 +binarization_args: + shuffle: false + with_wav: false + with_align: true + with_spk_embed: false + with_f0: true + with_f0cwt: false + with_linear: false + trim_eos_bos: false + min_sil_duration: 0.1 + train_range: [ 200, -1 ] + test_range: [ 0, 100 ] + valid_range: [ 100, 200 ] +word_dict_size: 10000 +pitch_key: pitch \ No newline at end of file diff --git a/text_to_speech/egs/egs_bases/tts/ds.yaml b/text_to_speech/egs/egs_bases/tts/ds.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85f606a96c9efec2634542313ca7eea6b16ad239 --- /dev/null +++ b/text_to_speech/egs/egs_bases/tts/ds.yaml @@ -0,0 +1,33 @@ +base_config: ./fs2_orig.yaml + +# special configs for diffspeech +task_cls: tasks.tts.diffspeech.DiffSpeechTask +lr: 0.001 +timesteps: 100 +K_step: 71 +diff_loss_type: l1 +diff_decoder_type: 'wavenet' +schedule_type: 'linear' +max_beta: 0.06 + +## model configs for diffspeech +dilation_cycle_length: 1 +residual_layers: 20 +residual_channels: 256 +decay_steps: 50000 +keep_bins: 80 +#content_cond_steps: [ ] # [ 0, 10000 ] +#spk_cond_steps: [ ] # [ 0, 10000 ] +#gen_tgt_spk_id: -1 + + + +# training configs for diffspeech +#max_sentences: 48 +#num_sanity_val_steps: 1 +num_valid_plots: 10 +use_gt_dur: false +use_gt_f0: false +use_energy_embed: false +#pitch_type: cwt +max_updates: 160000 \ No newline at end of file diff --git a/text_to_speech/egs/egs_bases/tts/fs.yaml b/text_to_speech/egs/egs_bases/tts/fs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..025d3079b82a0f03397a5b59040a97d82c816f2d --- /dev/null +++ b/text_to_speech/egs/egs_bases/tts/fs.yaml @@ -0,0 +1,75 @@ +base_config: ./base.yaml +task_cls: tasks.tts.fs.FastSpeechTask + +# model +hidden_size: 256 +dropout: 0.0 +encoder_type: rel_fft # rel_fft|fft|tacotron|tacotron2|conformer +decoder_type: conv # fft|rnn|conv|conformer|wn + +# rnn enc/dec +encoder_K: 8 +decoder_rnn_dim: 0 # for rnn decoder, 0 -> hidden_size * 2 + +# fft enc/dec +enc_layers: 4 +enc_ffn_kernel_size: 9 +enc_prenet: true +enc_pre_ln: true +dec_layers: 4 +dec_ffn_kernel_size: 9 +num_heads: 2 +ffn_act: gelu +ffn_hidden_size: 1024 +use_pos_embed: true + +# conv enc/dec +enc_dec_norm: ln +conv_use_pos: false +layers_in_block: 2 +enc_dilations: [ 1, 1, 1, 1 ] +enc_kernel_size: 5 +enc_post_net_kernel: 3 +dec_dilations: [ 1, 1, 1, 1 ] # for conv decoder +dec_kernel_size: 5 +dec_post_net_kernel: 3 + +# duration +predictor_hidden: -1 +dur_predictor_kernel: 3 +dur_predictor_layers: 2 +predictor_kernel: 5 +predictor_layers: 5 +predictor_dropout: 0.5 + +# pitch and energy +use_pitch_embed: false +pitch_type: frame # frame|ph|cwt +use_uv: true + +# reference encoder and speaker embedding +lambda_commit: 0.25 +ref_norm_layer: bn +dec_inp_add_noise: false + +# mel +mel_losses: l1:0.5|ssim:0.5 # l1|l2|gdl|ssim or l1:0.5|ssim:0.5 + +# loss lambda +lambda_f0: 1.0 +lambda_uv: 1.0 +lambda_energy: 0.1 +lambda_ph_dur: 0.1 +lambda_sent_dur: 1.0 +lambda_word_dur: 1.0 +predictor_grad: 0.1 + +# train and eval +warmup_updates: 4000 +max_tokens: 40000 +max_sentences: 128 +max_valid_sentences: 1 +max_updates: 160000 +use_gt_dur: false +use_gt_f0: false +ds_workers: 0 diff --git a/text_to_speech/egs/egs_bases/tts/fs2_orig.yaml b/text_to_speech/egs/egs_bases/tts/fs2_orig.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0a003331f1e45a342ac16175b7dde97bb1d6db03 --- /dev/null +++ b/text_to_speech/egs/egs_bases/tts/fs2_orig.yaml @@ -0,0 +1,13 @@ +base_config: ./fs.yaml +task_cls: tasks.tts.fs2_orig.FastSpeech2OrigTask +encoder_type: fft +decoder_type: fft +use_energy_embed: false +use_pitch_embed: true +pitch_type: cwt # frame|ph|cwt +binarization_args: + with_f0cwt: true +use_gt_energy: false +cwt_std_scale: 0.8 +dropout: 0.1 +mel_losses: l1 \ No newline at end of file diff --git a/text_to_speech/egs/egs_bases/tts/fs_adv.yaml b/text_to_speech/egs/egs_bases/tts/fs_adv.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6b0bc078fd25fbaca222df97f65445f64b842d30 --- /dev/null +++ b/text_to_speech/egs/egs_bases/tts/fs_adv.yaml @@ -0,0 +1,31 @@ +base_config: ./fs.yaml +task_cls: tasks.tts.fs_adv.FastSpeechAdvTask + +lambda_mel_adv: 0.05 + +disc_win_num: 3 +mel_disc_hidden_size: 128 +disc_norm: in +disc_reduction: stack +disc_interval: 1 +disc_lr: 0.0001 +disc_start_steps: 0 +discriminator_scheduler_params: + gamma: 0.5 + step_size: 40000 +discriminator_optimizer_params: + eps: 1.0e-06 + weight_decay: 0.0 + +scheduler: rsqrt +lr: 0.4 # edit from 1. + +# copied from graph-porta-speech repo +predictor_grad: 0.02 + +max_tokens: 30000 +num_sanity_val_steps: 0 # steps of validation at the beginning +ds_workers: 1 + +use_pitch_embed: false # Attention! no pitch predictor! +pitch_type: cwt # frame|ph|cwt diff --git a/text_to_speech/egs/egs_bases/tts/ps.yaml b/text_to_speech/egs/egs_bases/tts/ps.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e749faf5d159f44c4a0061db075361f704cc5a4b --- /dev/null +++ b/text_to_speech/egs/egs_bases/tts/ps.yaml @@ -0,0 +1,63 @@ +base_config: ./fs.yaml + +########################### +# models +########################### +# encoders +hidden_size: 192 +ffn_hidden_size: 768 +enc_ffn_kernel_size: 5 +enc_layers: 4 +dur_level: word +encoder_type: rel_fft +use_word_encoder: true + +# mix ling encoder +word_enc_layers: 4 +word_encoder_type: rel_fft +use_pitch_embed: false +enc_prenet: true +enc_pre_ln: true +text_encoder_postnet: true +dropout: 0.0 +add_word_pos: true + +# dur predictor +dur_predictor_layers: 3 +dur_predictor_kernel: 5 +predictor_dropout: 0.2 + +## fvae +use_fvae: true +latent_size: 16 +fvae_encoder_type: conv +fvae_decoder_type: conv +fvae_enc_dec_hidden: 192 +fvae_kernel_size: 5 +fvae_enc_n_layers: 8 +fvae_dec_n_layers: 4 +fvae_strides: 4 +fvae_noise_scale: 1.0 + +# prior flow +use_prior_flow: true +prior_flow_hidden: 64 +prior_flow_kernel_size: 3 +prior_flow_n_blocks: 4 + +########################### +# training and inference +########################### +lambda_kl: 1.0 +kl_min: 0.0 +lambda_sent_dur: 0.0 +kl_start_steps: 10000 +posterior_start_steps: 0 +frames_multiple: 4 +num_valid_plots: 10 +lr: 0.0002 +warmup_updates: 8000 +max_tokens: 40000 +valid_infer_interval: 10000 +max_sentences: 80 +max_updates: 480000 \ No newline at end of file diff --git a/text_to_speech/egs/egs_bases/tts/ps_flow.yaml b/text_to_speech/egs/egs_bases/tts/ps_flow.yaml new file mode 100644 index 0000000000000000000000000000000000000000..56a4326101aa0d89046687cc8cbdcb2f06c6d2d8 --- /dev/null +++ b/text_to_speech/egs/egs_bases/tts/ps_flow.yaml @@ -0,0 +1,20 @@ +base_config: ./ps.yaml +task_cls: tasks.tts.ps_flow.PortaSpeechFlowTask + +use_post_flow: true +detach_postflow_input: true +post_flow_lr: 0.001 +post_glow_hidden: 192 +post_glow_kernel_size: 3 +post_glow_n_blocks: 12 +post_glow_n_block_layers: 3 +post_share_cond_layers: false +share_wn_layers: 4 +use_cond_proj: false +use_latent_cond: false +use_txt_cond: true +sigmoid_scale: false +post_glow_training_start: 160000 +noise_scale: 0.8 +infer_post_glow: true +two_stage: true \ No newline at end of file diff --git a/text_to_speech/egs/egs_bases/tts/ps_flow_small.yaml b/text_to_speech/egs/egs_bases/tts/ps_flow_small.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e95a86492ee9c6516a05201bf4d0a2de6e55e947 --- /dev/null +++ b/text_to_speech/egs/egs_bases/tts/ps_flow_small.yaml @@ -0,0 +1,42 @@ +base_config: ./ps_flow.yaml + +########################### +# models +########################### +# encoders +hidden_size: 128 +ffn_hidden_size: 512 +enc_ffn_kernel_size: 3 +enc_layers: 3 +word_enc_layers: 3 + +# dur predictor +dur_predictor_layers: 3 +dur_predictor_kernel: 5 +predictor_dropout: 0.2 + +## fvae +use_fvae: true +latent_size: 16 +fvae_encoder_type: wn +fvae_decoder_type: wn +fvae_enc_dec_hidden: 128 +fvae_kernel_size: 3 +fvae_enc_n_layers: 8 +fvae_dec_n_layers: 3 +fvae_strides: 4 +fvae_noise_scale: 1.0 + + +# prior flow +use_prior_flow: true +prior_flow_hidden: 32 +prior_flow_kernel_size: 3 +prior_flow_n_blocks: 3 +# post flow +post_glow_hidden: 128 +post_glow_kernel_size: 3 +post_glow_n_blocks: 8 +post_glow_n_block_layers: 3 +share_wn_layers: 4 +noise_scale: 0.6 \ No newline at end of file diff --git a/text_to_speech/egs/egs_bases/tts/vocoder/base.yaml b/text_to_speech/egs/egs_bases/tts/vocoder/base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d7adee14286b0c2377ce88670b1dab787ea8a2c9 --- /dev/null +++ b/text_to_speech/egs/egs_bases/tts/vocoder/base.yaml @@ -0,0 +1,20 @@ +base_config: + - egs/egs_bases/config_base.yaml + - ../dataset_params.yaml +binarization_args: + with_wav: true + with_spk_embed: false + with_align: false + +generator_grad_norm: 10.0 # Generator's gradient norm. +discriminator_grad_norm: 1.0 # Discriminator's gradient norm. + +########### +# train and eval +########### +max_samples: 20480 +max_sentences: 8 +max_valid_sentences: 1 +max_updates: 2000000 +val_check_interval: 5000 +valid_infer_interval: 50000 diff --git a/text_to_speech/egs/egs_bases/tts/vocoder/hifigan.yaml b/text_to_speech/egs/egs_bases/tts/vocoder/hifigan.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fb77ba5104bf9d5b7ce9282f626bf56e957fbfe8 --- /dev/null +++ b/text_to_speech/egs/egs_bases/tts/vocoder/hifigan.yaml @@ -0,0 +1,28 @@ +base_config: ./base.yaml +task_cls: tasks.vocoder.hifigan.HifiGanTask +resblock: "1" +adam_b1: 0.8 +adam_b2: 0.99 +upsample_rates: [ 8,8,2,2 ] +upsample_kernel_sizes: [ 16,16,4,4 ] +upsample_initial_channel: 512 +resblock_kernel_sizes: [ 3,7,11 ] +resblock_dilation_sizes: [ [ 1,3,5 ], [ 1,3,5 ], [ 1,3,5 ] ] + +use_pitch_embed: false +use_fm_loss: false +use_ms_stft: false + +lambda_mel: 5.0 +lambda_mel_adv: 1.0 +lambda_cdisc: 4.0 +lambda_adv: 1.0 + +lr: 0.0002 # Generator's learning rate. +generator_scheduler_params: + step_size: 600 + gamma: 0.999 +discriminator_scheduler_params: + step_size: 600 + gamma: 0.999 +max_updates: 3000000 \ No newline at end of file diff --git a/text_to_speech/modules/commons/__pycache__/conv.cpython-38.pyc b/text_to_speech/modules/commons/__pycache__/conv.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fd7e3d9195fdf03a1eb89613f0ccf99af1ab8e6 Binary files /dev/null and b/text_to_speech/modules/commons/__pycache__/conv.cpython-38.pyc differ diff --git a/text_to_speech/modules/commons/__pycache__/layers.cpython-38.pyc b/text_to_speech/modules/commons/__pycache__/layers.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92d414b0f18ffb8d469c41b05e82c0396064d3f7 Binary files /dev/null and b/text_to_speech/modules/commons/__pycache__/layers.cpython-38.pyc differ diff --git a/text_to_speech/modules/commons/__pycache__/nar_tts_modules.cpython-38.pyc b/text_to_speech/modules/commons/__pycache__/nar_tts_modules.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f13acda46ead052da94194968d4c60f35bffcb5 Binary files /dev/null and b/text_to_speech/modules/commons/__pycache__/nar_tts_modules.cpython-38.pyc differ diff --git a/text_to_speech/modules/commons/__pycache__/rel_transformer.cpython-38.pyc b/text_to_speech/modules/commons/__pycache__/rel_transformer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1ca0455d7737e6d57f98a3a5dce71440fc40c8f Binary files /dev/null and b/text_to_speech/modules/commons/__pycache__/rel_transformer.cpython-38.pyc differ diff --git a/text_to_speech/modules/commons/__pycache__/rnn.cpython-38.pyc b/text_to_speech/modules/commons/__pycache__/rnn.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11d9b38fc38845bfec70bb6c5654c2032344b39e Binary files /dev/null and b/text_to_speech/modules/commons/__pycache__/rnn.cpython-38.pyc differ diff --git a/text_to_speech/modules/commons/__pycache__/transformer.cpython-38.pyc b/text_to_speech/modules/commons/__pycache__/transformer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a9aff1598b88f75dd08696110324276190ff75f Binary files /dev/null and b/text_to_speech/modules/commons/__pycache__/transformer.cpython-38.pyc differ diff --git a/text_to_speech/modules/commons/__pycache__/wavenet.cpython-38.pyc b/text_to_speech/modules/commons/__pycache__/wavenet.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f94d39a76f516db28d02dd2d021698923d7e8bc Binary files /dev/null and b/text_to_speech/modules/commons/__pycache__/wavenet.cpython-38.pyc differ diff --git a/text_to_speech/modules/commons/conformer/conformer.py b/text_to_speech/modules/commons/conformer/conformer.py new file mode 100644 index 0000000000000000000000000000000000000000..21e1ecdda7ec069864d3904abb4360ec5aee637e --- /dev/null +++ b/text_to_speech/modules/commons/conformer/conformer.py @@ -0,0 +1,72 @@ +from torch import nn +from .espnet_positional_embedding import RelPositionalEncoding +from .espnet_transformer_attn import RelPositionMultiHeadedAttention +from .layers import Swish, ConvolutionModule, EncoderLayer, MultiLayeredConv1d +from ..layers import Embedding + + +class ConformerLayers(nn.Module): + def __init__(self, hidden_size, num_layers, kernel_size=9, dropout=0.0, num_heads=4, + use_last_norm=True, save_hidden=False): + super().__init__() + self.use_last_norm = use_last_norm + self.layers = nn.ModuleList() + positionwise_layer = MultiLayeredConv1d + positionwise_layer_args = (hidden_size, hidden_size * 4, 1, dropout) + self.pos_embed = RelPositionalEncoding(hidden_size, dropout) + self.encoder_layers = nn.ModuleList([EncoderLayer( + hidden_size, + RelPositionMultiHeadedAttention(num_heads, hidden_size, 0.0), + positionwise_layer(*positionwise_layer_args), + positionwise_layer(*positionwise_layer_args), + ConvolutionModule(hidden_size, kernel_size, Swish()), + dropout, + ) for _ in range(num_layers)]) + if self.use_last_norm: + self.layer_norm = nn.LayerNorm(hidden_size) + else: + self.layer_norm = nn.Linear(hidden_size, hidden_size) + self.save_hidden = save_hidden + if save_hidden: + self.hiddens = [] + + def forward(self, x, padding_mask=None): + """ + + :param x: [B, T, H] + :param padding_mask: [B, T] + :return: [B, T, H] + """ + self.hiddens = [] + nonpadding_mask = x.abs().sum(-1) > 0 + x = self.pos_embed(x) + for l in self.encoder_layers: + x, mask = l(x, nonpadding_mask[:, None, :]) + if self.save_hidden: + self.hiddens.append(x[0]) + x = x[0] + x = self.layer_norm(x) * nonpadding_mask.float()[:, :, None] + return x + + +class ConformerEncoder(ConformerLayers): + def __init__(self, hidden_size, dict_size, num_layers=None): + conformer_enc_kernel_size = 9 + super().__init__(hidden_size, num_layers, conformer_enc_kernel_size) + self.embed = Embedding(dict_size, hidden_size, padding_idx=0) + + def forward(self, x): + """ + + :param src_tokens: [B, T] + :return: [B x T x C] + """ + x = self.embed(x) # [B, T, H] + x = super(ConformerEncoder, self).forward(x) + return x + + +class ConformerDecoder(ConformerLayers): + def __init__(self, hidden_size, num_layers): + conformer_dec_kernel_size = 9 + super().__init__(hidden_size, num_layers, conformer_dec_kernel_size) diff --git a/text_to_speech/modules/commons/conformer/espnet_positional_embedding.py b/text_to_speech/modules/commons/conformer/espnet_positional_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..89b9b5549cc779d1ea67f052b1c99cad92365503 --- /dev/null +++ b/text_to_speech/modules/commons/conformer/espnet_positional_embedding.py @@ -0,0 +1,113 @@ +import math +import torch + + +class PositionalEncoding(torch.nn.Module): + """Positional encoding. + Args: + d_model (int): Embedding dimension. + dropout_rate (float): Dropout rate. + max_len (int): Maximum input length. + reverse (bool): Whether to reverse the input position. + """ + + def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False): + """Construct an PositionalEncoding object.""" + super(PositionalEncoding, self).__init__() + self.d_model = d_model + self.reverse = reverse + self.xscale = math.sqrt(self.d_model) + self.dropout = torch.nn.Dropout(p=dropout_rate) + self.pe = None + self.extend_pe(torch.tensor(0.0).expand(1, max_len)) + + def extend_pe(self, x): + """Reset the positional encodings.""" + if self.pe is not None: + if self.pe.size(1) >= x.size(1): + if self.pe.dtype != x.dtype or self.pe.device != x.device: + self.pe = self.pe.to(dtype=x.dtype, device=x.device) + return + pe = torch.zeros(x.size(1), self.d_model) + if self.reverse: + position = torch.arange( + x.size(1) - 1, -1, -1.0, dtype=torch.float32 + ).unsqueeze(1) + else: + position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) + div_term = torch.exp( + torch.arange(0, self.d_model, 2, dtype=torch.float32) + * -(math.log(10000.0) / self.d_model) + ) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = pe.unsqueeze(0) + self.pe = pe.to(device=x.device, dtype=x.dtype) + + def forward(self, x: torch.Tensor): + """Add positional encoding. + Args: + x (torch.Tensor): Input tensor (batch, time, `*`). + Returns: + torch.Tensor: Encoded tensor (batch, time, `*`). + """ + self.extend_pe(x) + x = x * self.xscale + self.pe[:, : x.size(1)] + return self.dropout(x) + + +class ScaledPositionalEncoding(PositionalEncoding): + """Scaled positional encoding module. + See Sec. 3.2 https://arxiv.org/abs/1809.08895 + Args: + d_model (int): Embedding dimension. + dropout_rate (float): Dropout rate. + max_len (int): Maximum input length. + """ + + def __init__(self, d_model, dropout_rate, max_len=5000): + """Initialize class.""" + super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len) + self.alpha = torch.nn.Parameter(torch.tensor(1.0)) + + def reset_parameters(self): + """Reset parameters.""" + self.alpha.data = torch.tensor(1.0) + + def forward(self, x): + """Add positional encoding. + Args: + x (torch.Tensor): Input tensor (batch, time, `*`). + Returns: + torch.Tensor: Encoded tensor (batch, time, `*`). + """ + self.extend_pe(x) + x = x + self.alpha * self.pe[:, : x.size(1)] + return self.dropout(x) + + +class RelPositionalEncoding(PositionalEncoding): + """Relative positional encoding module. + See : Appendix B in https://arxiv.org/abs/1901.02860 + Args: + d_model (int): Embedding dimension. + dropout_rate (float): Dropout rate. + max_len (int): Maximum input length. + """ + + def __init__(self, d_model, dropout_rate, max_len=5000): + """Initialize class.""" + super().__init__(d_model, dropout_rate, max_len, reverse=True) + + def forward(self, x): + """Compute positional encoding. + Args: + x (torch.Tensor): Input tensor (batch, time, `*`). + Returns: + torch.Tensor: Encoded tensor (batch, time, `*`). + torch.Tensor: Positional embedding tensor (1, time, `*`). + """ + self.extend_pe(x) + x = x * self.xscale + pos_emb = self.pe[:, : x.size(1)] + return self.dropout(x), self.dropout(pos_emb) \ No newline at end of file diff --git a/text_to_speech/modules/commons/conformer/espnet_transformer_attn.py b/text_to_speech/modules/commons/conformer/espnet_transformer_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..a479a27ea6fd4202359da435234408ba074f7577 --- /dev/null +++ b/text_to_speech/modules/commons/conformer/espnet_transformer_attn.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2019 Shigeki Karita +# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) + +"""Multi-Head Attention layer definition.""" + +import math + +import numpy +import torch +from torch import nn + + +class MultiHeadedAttention(nn.Module): + """Multi-Head Attention layer. + Args: + n_head (int): The number of heads. + n_feat (int): The number of features. + dropout_rate (float): Dropout rate. + """ + + def __init__(self, n_head, n_feat, dropout_rate): + """Construct an MultiHeadedAttention object.""" + super(MultiHeadedAttention, self).__init__() + assert n_feat % n_head == 0 + # We assume d_v always equals d_k + self.d_k = n_feat // n_head + self.h = n_head + self.linear_q = nn.Linear(n_feat, n_feat) + self.linear_k = nn.Linear(n_feat, n_feat) + self.linear_v = nn.Linear(n_feat, n_feat) + self.linear_out = nn.Linear(n_feat, n_feat) + self.attn = None + self.dropout = nn.Dropout(p=dropout_rate) + + def forward_qkv(self, query, key, value): + """Transform query, key and value. + Args: + query (torch.Tensor): Query tensor (#batch, time1, size). + key (torch.Tensor): Key tensor (#batch, time2, size). + value (torch.Tensor): Value tensor (#batch, time2, size). + Returns: + torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k). + torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k). + torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k). + """ + n_batch = query.size(0) + q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) + k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) + v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) + q = q.transpose(1, 2) # (batch, head, time1, d_k) + k = k.transpose(1, 2) # (batch, head, time2, d_k) + v = v.transpose(1, 2) # (batch, head, time2, d_k) + + return q, k, v + + def forward_attention(self, value, scores, mask): + """Compute attention context vector. + Args: + value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k). + scores (torch.Tensor): Attention score (#batch, n_head, time1, time2). + mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2). + Returns: + torch.Tensor: Transformed value (#batch, time1, d_model) + weighted by the attention score (#batch, time1, time2). + """ + n_batch = value.size(0) + if mask is not None: + mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2) + min_value = float( + numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min + ) + scores = scores.masked_fill(mask, min_value) + self.attn = torch.softmax(scores, dim=-1).masked_fill( + mask, 0.0 + ) # (batch, head, time1, time2) + else: + self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2) + + p_attn = self.dropout(self.attn) + x = torch.matmul(p_attn, value) # (batch, head, time1, d_k) + x = ( + x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) + ) # (batch, time1, d_model) + + return self.linear_out(x) # (batch, time1, d_model) + + def forward(self, query, key, value, mask): + """Compute scaled dot product attention. + Args: + query (torch.Tensor): Query tensor (#batch, time1, size). + key (torch.Tensor): Key tensor (#batch, time2, size). + value (torch.Tensor): Value tensor (#batch, time2, size). + mask (torch.Tensor): Mask tensor (#batch, 1, time2) or + (#batch, time1, time2). + Returns: + torch.Tensor: Output tensor (#batch, time1, d_model). + """ + q, k, v = self.forward_qkv(query, key, value) + scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) + return self.forward_attention(v, scores, mask) + + +class RelPositionMultiHeadedAttention(MultiHeadedAttention): + """Multi-Head Attention layer with relative position encoding. + Paper: https://arxiv.org/abs/1901.02860 + Args: + n_head (int): The number of heads. + n_feat (int): The number of features. + dropout_rate (float): Dropout rate. + """ + + def __init__(self, n_head, n_feat, dropout_rate): + """Construct an RelPositionMultiHeadedAttention object.""" + super().__init__(n_head, n_feat, dropout_rate) + # linear transformation for positional ecoding + self.linear_pos = nn.Linear(n_feat, n_feat, bias=False) + # these two learnable bias are used in matrix c and matrix d + # as described in https://arxiv.org/abs/1901.02860 Section 3.3 + self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k)) + self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k)) + torch.nn.init.xavier_uniform_(self.pos_bias_u) + torch.nn.init.xavier_uniform_(self.pos_bias_v) + + def rel_shift(self, x, zero_triu=False): + """Compute relative positinal encoding. + Args: + x (torch.Tensor): Input tensor (batch, time, size). + zero_triu (bool): If true, return the lower triangular part of the matrix. + Returns: + torch.Tensor: Output tensor. + """ + zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype) + x_padded = torch.cat([zero_pad, x], dim=-1) + + x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2)) + x = x_padded[:, :, 1:].view_as(x) + + if zero_triu: + ones = torch.ones((x.size(2), x.size(3))) + x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :] + + return x + + def forward(self, query, key, value, pos_emb, mask): + """Compute 'Scaled Dot Product Attention' with rel. positional encoding. + Args: + query (torch.Tensor): Query tensor (#batch, time1, size). + key (torch.Tensor): Key tensor (#batch, time2, size). + value (torch.Tensor): Value tensor (#batch, time2, size). + pos_emb (torch.Tensor): Positional embedding tensor (#batch, time2, size). + mask (torch.Tensor): Mask tensor (#batch, 1, time2) or + (#batch, time1, time2). + Returns: + torch.Tensor: Output tensor (#batch, time1, d_model). + """ + q, k, v = self.forward_qkv(query, key, value) + q = q.transpose(1, 2) # (batch, time1, head, d_k) + + n_batch_pos = pos_emb.size(0) + p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k) + p = p.transpose(1, 2) # (batch, head, time1, d_k) + + # (batch, head, time1, d_k) + q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2) + # (batch, head, time1, d_k) + q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2) + + # compute attention score + # first compute matrix a and matrix c + # as described in https://arxiv.org/abs/1901.02860 Section 3.3 + # (batch, head, time1, time2) + matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1)) + + # compute matrix b and matrix d + # (batch, head, time1, time2) + matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1)) + matrix_bd = self.rel_shift(matrix_bd) + + scores = (matrix_ac + matrix_bd) / math.sqrt( + self.d_k + ) # (batch, head, time1, time2) + + return self.forward_attention(v, scores, mask) diff --git a/text_to_speech/modules/commons/conformer/layers.py b/text_to_speech/modules/commons/conformer/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..65b6414128dc9499940c1dcfd38ab27f119152cd --- /dev/null +++ b/text_to_speech/modules/commons/conformer/layers.py @@ -0,0 +1,260 @@ +from torch import nn +import torch + +from text_to_speech.modules.commons.layers import LayerNorm + + +class ConvolutionModule(nn.Module): + """ConvolutionModule in Conformer model. + Args: + channels (int): The number of channels of conv layers. + kernel_size (int): Kernerl size of conv layers. + """ + + def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True): + """Construct an ConvolutionModule object.""" + super(ConvolutionModule, self).__init__() + # kernerl_size should be a odd number for 'SAME' padding + assert (kernel_size - 1) % 2 == 0 + + self.pointwise_conv1 = nn.Conv1d( + channels, + 2 * channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + ) + self.depthwise_conv = nn.Conv1d( + channels, + channels, + kernel_size, + stride=1, + padding=(kernel_size - 1) // 2, + groups=channels, + bias=bias, + ) + self.norm = nn.BatchNorm1d(channels) + self.pointwise_conv2 = nn.Conv1d( + channels, + channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + ) + self.activation = activation + + def forward(self, x): + """Compute convolution module. + Args: + x (torch.Tensor): Input tensor (#batch, time, channels). + Returns: + torch.Tensor: Output tensor (#batch, time, channels). + """ + # exchange the temporal dimension and the feature dimension + x = x.transpose(1, 2) + + # GLU mechanism + x = self.pointwise_conv1(x) # (batch, 2*channel, dim) + x = nn.functional.glu(x, dim=1) # (batch, channel, dim) + + # 1D Depthwise Conv + x = self.depthwise_conv(x) + x = self.activation(self.norm(x)) + + x = self.pointwise_conv2(x) + + return x.transpose(1, 2) + + +class MultiLayeredConv1d(torch.nn.Module): + """Multi-layered conv1d for Transformer block. + This is a module of multi-leyered conv1d designed + to replace positionwise feed-forward network + in Transforner block, which is introduced in + `FastSpeech: Fast, Robust and Controllable Text to Speech`_. + .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`: + https://arxiv.org/pdf/1905.09263.pdf + """ + + def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate): + """Initialize MultiLayeredConv1d module. + Args: + in_chans (int): Number of input channels. + hidden_chans (int): Number of hidden channels. + kernel_size (int): Kernel size of conv1d. + dropout_rate (float): Dropout rate. + """ + super(MultiLayeredConv1d, self).__init__() + self.w_1 = torch.nn.Conv1d( + in_chans, + hidden_chans, + kernel_size, + stride=1, + padding=(kernel_size - 1) // 2, + ) + self.w_2 = torch.nn.Conv1d( + hidden_chans, + in_chans, + kernel_size, + stride=1, + padding=(kernel_size - 1) // 2, + ) + self.dropout = torch.nn.Dropout(dropout_rate) + + def forward(self, x): + """Calculate forward propagation. + Args: + x (torch.Tensor): Batch of input tensors (B, T, in_chans). + Returns: + torch.Tensor: Batch of output tensors (B, T, hidden_chans). + """ + x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1) + return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1) + + +class Swish(torch.nn.Module): + """Construct an Swish object.""" + + def forward(self, x): + """Return Swich activation function.""" + return x * torch.sigmoid(x) + + +class EncoderLayer(nn.Module): + """Encoder layer module. + Args: + size (int): Input dimension. + self_attn (torch.nn.Module): Self-attention module instance. + `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance + can be used as the argument. + feed_forward (torch.nn.Module): Feed-forward module instance. + `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance + can be used as the argument. + feed_forward_macaron (torch.nn.Module): Additional feed-forward module instance. + `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance + can be used as the argument. + conv_module (torch.nn.Module): Convolution module instance. + `ConvlutionModule` instance can be used as the argument. + dropout_rate (float): Dropout rate. + normalize_before (bool): Whether to use layer_norm before the first block. + concat_after (bool): Whether to concat attention layer's input and output. + if True, additional linear will be applied. + i.e. x -> x + linear(concat(x, att(x))) + if False, no additional linear will be applied. i.e. x -> x + att(x) + """ + + def __init__( + self, + size, + self_attn, + feed_forward, + feed_forward_macaron, + conv_module, + dropout_rate, + normalize_before=True, + concat_after=False, + ): + """Construct an EncoderLayer object.""" + super(EncoderLayer, self).__init__() + self.self_attn = self_attn + self.feed_forward = feed_forward + self.feed_forward_macaron = feed_forward_macaron + self.conv_module = conv_module + self.norm_ff = LayerNorm(size) # for the FNN module + self.norm_mha = LayerNorm(size) # for the MHA module + if feed_forward_macaron is not None: + self.norm_ff_macaron = LayerNorm(size) + self.ff_scale = 0.5 + else: + self.ff_scale = 1.0 + if self.conv_module is not None: + self.norm_conv = LayerNorm(size) # for the CNN module + self.norm_final = LayerNorm(size) # for the final output of the block + self.dropout = nn.Dropout(dropout_rate) + self.size = size + self.normalize_before = normalize_before + self.concat_after = concat_after + if self.concat_after: + self.concat_linear = nn.Linear(size + size, size) + + def forward(self, x_input, mask, cache=None): + """Compute encoded features. + Args: + x_input (Union[Tuple, torch.Tensor]): Input tensor w/ or w/o pos emb. + - w/ pos emb: Tuple of tensors [(#batch, time, size), (1, time, size)]. + - w/o pos emb: Tensor (#batch, time, size). + mask (torch.Tensor): Mask tensor for the input (#batch, time). + cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size). + Returns: + torch.Tensor: Output tensor (#batch, time, size). + torch.Tensor: Mask tensor (#batch, time). + """ + if isinstance(x_input, tuple): + x, pos_emb = x_input[0], x_input[1] + else: + x, pos_emb = x_input, None + + # whether to use macaron style + if self.feed_forward_macaron is not None: + residual = x + if self.normalize_before: + x = self.norm_ff_macaron(x) + x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x)) + if not self.normalize_before: + x = self.norm_ff_macaron(x) + + # multi-headed self-attention module + residual = x + if self.normalize_before: + x = self.norm_mha(x) + + if cache is None: + x_q = x + else: + assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size) + x_q = x[:, -1:, :] + residual = residual[:, -1:, :] + mask = None if mask is None else mask[:, -1:, :] + + if pos_emb is not None: + x_att = self.self_attn(x_q, x, x, pos_emb, mask) + else: + x_att = self.self_attn(x_q, x, x, mask) + + if self.concat_after: + x_concat = torch.cat((x, x_att), dim=-1) + x = residual + self.concat_linear(x_concat) + else: + x = residual + self.dropout(x_att) + if not self.normalize_before: + x = self.norm_mha(x) + + # convolution module + if self.conv_module is not None: + residual = x + if self.normalize_before: + x = self.norm_conv(x) + x = residual + self.dropout(self.conv_module(x)) + if not self.normalize_before: + x = self.norm_conv(x) + + # feed forward module + residual = x + if self.normalize_before: + x = self.norm_ff(x) + x = residual + self.ff_scale * self.dropout(self.feed_forward(x)) + if not self.normalize_before: + x = self.norm_ff(x) + + if self.conv_module is not None: + x = self.norm_final(x) + + if cache is not None: + x = torch.cat([cache, x], dim=1) + + if pos_emb is not None: + return (x, pos_emb), mask + + return x, mask diff --git a/text_to_speech/modules/commons/conv.py b/text_to_speech/modules/commons/conv.py new file mode 100644 index 0000000000000000000000000000000000000000..7edf126a080767f760dc7d19a349fb9a44afeb46 --- /dev/null +++ b/text_to_speech/modules/commons/conv.py @@ -0,0 +1,167 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +from text_to_speech.modules.commons.layers import LayerNorm, Embedding + + +class LambdaLayer(nn.Module): + def __init__(self, lambd): + super(LambdaLayer, self).__init__() + self.lambd = lambd + + def forward(self, x): + return self.lambd(x) + + +def init_weights_func(m): + classname = m.__class__.__name__ + if classname.find("Conv1d") != -1: + torch.nn.init.xavier_uniform_(m.weight) + + +class ResidualBlock(nn.Module): + """Implements conv->PReLU->norm n-times""" + + def __init__(self, channels, kernel_size, dilation, n=2, norm_type='bn', dropout=0.0, + c_multiple=2, ln_eps=1e-12): + super(ResidualBlock, self).__init__() + + if norm_type == 'bn': + norm_builder = lambda: nn.BatchNorm1d(channels) + elif norm_type == 'in': + norm_builder = lambda: nn.InstanceNorm1d(channels, affine=True) + elif norm_type == 'gn': + norm_builder = lambda: nn.GroupNorm(8, channels) + elif norm_type == 'ln': + norm_builder = lambda: LayerNorm(channels, dim=1, eps=ln_eps) + else: + norm_builder = lambda: nn.Identity() + + self.blocks = [ + nn.Sequential( + norm_builder(), + nn.Conv1d(channels, c_multiple * channels, kernel_size, dilation=dilation, + padding=(dilation * (kernel_size - 1)) // 2), + LambdaLayer(lambda x: x * kernel_size ** -0.5), + nn.GELU(), + nn.Conv1d(c_multiple * channels, channels, 1, dilation=dilation), + ) + for i in range(n) + ] + + self.blocks = nn.ModuleList(self.blocks) + self.dropout = dropout + + def forward(self, x): + nonpadding = (x.abs().sum(1) > 0).float()[:, None, :] + for b in self.blocks: + x_ = b(x) + if self.dropout > 0 and self.training: + x_ = F.dropout(x_, self.dropout, training=self.training) + x = x + x_ + x = x * nonpadding + return x + + +class ConvBlocks(nn.Module): + """Decodes the expanded phoneme encoding into spectrograms""" + + def __init__(self, hidden_size, out_dims, dilations, kernel_size, + norm_type='ln', layers_in_block=2, c_multiple=2, + dropout=0.0, ln_eps=1e-5, + init_weights=True, is_BTC=True, num_layers=None, post_net_kernel=3): + super(ConvBlocks, self).__init__() + self.is_BTC = is_BTC + if num_layers is not None: + dilations = [1] * num_layers + self.res_blocks = nn.Sequential( + *[ResidualBlock(hidden_size, kernel_size, d, + n=layers_in_block, norm_type=norm_type, c_multiple=c_multiple, + dropout=dropout, ln_eps=ln_eps) + for d in dilations], + ) + if norm_type == 'bn': + norm = nn.BatchNorm1d(hidden_size) + elif norm_type == 'in': + norm = nn.InstanceNorm1d(hidden_size, affine=True) + elif norm_type == 'gn': + norm = nn.GroupNorm(8, hidden_size) + elif norm_type == 'ln': + norm = LayerNorm(hidden_size, dim=1, eps=ln_eps) + self.last_norm = norm + self.post_net1 = nn.Conv1d(hidden_size, out_dims, kernel_size=post_net_kernel, + padding=post_net_kernel // 2) + if init_weights: + self.apply(init_weights_func) + + def forward(self, x, nonpadding=None): + """ + + :param x: [B, T, H] + :return: [B, T, H] + """ + if self.is_BTC: + x = x.transpose(1, 2) + if nonpadding is None: + nonpadding = (x.abs().sum(1) > 0).float()[:, None, :] + elif self.is_BTC: + nonpadding = nonpadding.transpose(1, 2) + x = self.res_blocks(x) * nonpadding + x = self.last_norm(x) * nonpadding + x = self.post_net1(x) * nonpadding + if self.is_BTC: + x = x.transpose(1, 2) + return x + + +class TextConvEncoder(ConvBlocks): + def __init__(self, dict_size, hidden_size, out_dims, dilations, kernel_size, + norm_type='ln', layers_in_block=2, c_multiple=2, + dropout=0.0, ln_eps=1e-5, init_weights=True, num_layers=None, post_net_kernel=3): + super().__init__(hidden_size, out_dims, dilations, kernel_size, + norm_type, layers_in_block, c_multiple, + dropout, ln_eps, init_weights, num_layers=num_layers, + post_net_kernel=post_net_kernel) + self.embed_tokens = Embedding(dict_size, hidden_size, 0) + self.embed_scale = math.sqrt(hidden_size) + + def forward(self, txt_tokens): + """ + + :param txt_tokens: [B, T] + :return: { + 'encoder_out': [B x T x C] + } + """ + x = self.embed_scale * self.embed_tokens(txt_tokens) + return super().forward(x) + + +class ConditionalConvBlocks(ConvBlocks): + def __init__(self, hidden_size, c_cond, c_out, dilations, kernel_size, + norm_type='ln', layers_in_block=2, c_multiple=2, + dropout=0.0, ln_eps=1e-5, init_weights=True, is_BTC=True, num_layers=None): + super().__init__(hidden_size, c_out, dilations, kernel_size, + norm_type, layers_in_block, c_multiple, + dropout, ln_eps, init_weights, is_BTC=False, num_layers=num_layers) + self.g_prenet = nn.Conv1d(c_cond, hidden_size, 3, padding=1) + self.is_BTC_ = is_BTC + if init_weights: + self.g_prenet.apply(init_weights_func) + + def forward(self, x, cond, nonpadding=None): + if self.is_BTC_: + x = x.transpose(1, 2) + cond = cond.transpose(1, 2) + if nonpadding is not None: + nonpadding = nonpadding.transpose(1, 2) + if nonpadding is None: + nonpadding = x.abs().sum(1)[:, None] + x = x + self.g_prenet(cond) + x = x * nonpadding + x = super(ConditionalConvBlocks, self).forward(x) # input needs to be BTC + if self.is_BTC_: + x = x.transpose(1, 2) + return x diff --git a/text_to_speech/modules/commons/layers.py b/text_to_speech/modules/commons/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..88e1c75876050fa05a768a5ae0467fdfc05bb006 --- /dev/null +++ b/text_to_speech/modules/commons/layers.py @@ -0,0 +1,50 @@ +import torch +from torch import nn + + +class LayerNorm(torch.nn.LayerNorm): + """Layer normalization module. + :param int nout: output dim size + :param int dim: dimension to be normalized + """ + + def __init__(self, nout, dim=-1, eps=1e-5): + """Construct an LayerNorm object.""" + super(LayerNorm, self).__init__(nout, eps=eps) + self.dim = dim + + def forward(self, x): + """Apply layer normalization. + :param torch.Tensor x: input tensor + :return: layer normalized tensor + :rtype torch.Tensor + """ + if self.dim == -1: + return super(LayerNorm, self).forward(x) + return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1) + + +class Reshape(nn.Module): + def __init__(self, *args): + super(Reshape, self).__init__() + self.shape = args + + def forward(self, x): + return x.view(self.shape) + + +class Permute(nn.Module): + def __init__(self, *args): + super(Permute, self).__init__() + self.args = args + + def forward(self, x): + return x.permute(self.args) + + +def Embedding(num_embeddings, embedding_dim, padding_idx=None): + m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) + nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) + if padding_idx is not None: + nn.init.constant_(m.weight[padding_idx], 0) + return m diff --git a/text_to_speech/modules/commons/nar_tts_modules.py b/text_to_speech/modules/commons/nar_tts_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..fd6b53cc488b3b4407e43c703bfc180c31b39607 --- /dev/null +++ b/text_to_speech/modules/commons/nar_tts_modules.py @@ -0,0 +1,138 @@ +import torch +from torch import nn + +from text_to_speech.modules.commons.layers import LayerNorm +import torch.nn.functional as F + +class DurationPredictor(torch.nn.Module): + def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0): + super(DurationPredictor, self).__init__() + self.offset = offset + self.conv = torch.nn.ModuleList() + self.kernel_size = kernel_size + for idx in range(n_layers): + in_chans = idim if idx == 0 else n_chans + self.conv += [torch.nn.Sequential( + torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=kernel_size // 2), + torch.nn.ReLU(), + LayerNorm(n_chans, dim=1), + torch.nn.Dropout(dropout_rate) + )] + self.linear = nn.Sequential(torch.nn.Linear(n_chans, 1), nn.Softplus()) + + def forward(self, x, x_padding=None): + x = x.transpose(1, -1) # (B, idim, Tmax) + for f in self.conv: + x = f(x) # (B, C, Tmax) + if x_padding is not None: + x = x * (1 - x_padding.float())[:, None, :] + + x = self.linear(x.transpose(1, -1)) # [B, T, C] + x = x * (1 - x_padding.float())[:, :, None] # (B, T, C) + x = x[..., 0] # (B, Tmax) + return x + + +class SyntaDurationPredictor(torch.nn.Module): + def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0): + super(SyntaDurationPredictor, self).__init__() + from text_to_speech.modules.tts.syntaspeech.syntactic_graph_encoder import GraphAuxEnc + self.graph_encoder = GraphAuxEnc(in_dim=idim, hid_dim=idim, out_dim=idim) + self.offset = offset + self.conv = torch.nn.ModuleList() + self.kernel_size = kernel_size + for idx in range(n_layers): + in_chans = idim if idx == 0 else n_chans + self.conv += [torch.nn.Sequential( + torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=kernel_size // 2), + torch.nn.ReLU(), + LayerNorm(n_chans, dim=1), + torch.nn.Dropout(dropout_rate) + )] + self.linear = nn.Sequential(torch.nn.Linear(n_chans, 1), nn.Softplus()) + + def forward(self, x, x_padding=None, ph2word=None, graph_lst=None, etypes_lst=None): + x = x.transpose(1, -1) # (B, idim, Tmax) + assert ph2word is not None and graph_lst is not None and etypes_lst is not None + x_graph = self.graph_encoder(graph_lst, x, ph2word, etypes_lst) + x = x + x_graph * 1. + + for f in self.conv: + x = f(x) # (B, C, Tmax) + if x_padding is not None: + x = x * (1 - x_padding.float())[:, None, :] + + x = self.linear(x.transpose(1, -1)) # [B, T, C] + x = x * (1 - x_padding.float())[:, :, None] # (B, T, C) + x = x[..., 0] # (B, Tmax) + return x + + +class LengthRegulator(torch.nn.Module): + def __init__(self, pad_value=0.0): + super(LengthRegulator, self).__init__() + self.pad_value = pad_value + + def forward(self, dur, dur_padding=None, alpha=1.0): + """ + Example (no batch dim version): + 1. dur = [2,2,3] + 2. token_idx = [[1],[2],[3]], dur_cumsum = [2,4,7], dur_cumsum_prev = [0,2,4] + 3. token_mask = [[1,1,0,0,0,0,0], + [0,0,1,1,0,0,0], + [0,0,0,0,1,1,1]] + 4. token_idx * token_mask = [[1,1,0,0,0,0,0], + [0,0,2,2,0,0,0], + [0,0,0,0,3,3,3]] + 5. (token_idx * token_mask).sum(0) = [1,1,2,2,3,3,3] + + :param dur: Batch of durations of each frame (B, T_txt) + :param dur_padding: Batch of padding of each frame (B, T_txt) + :param alpha: duration rescale coefficient + :return: + mel2ph (B, T_speech) + assert alpha > 0 + """ + dur = torch.round(dur.float() * alpha).long() + if dur_padding is not None: + dur = dur * (1 - dur_padding.long()) + token_idx = torch.arange(1, dur.shape[1] + 1)[None, :, None].to(dur.device) + dur_cumsum = torch.cumsum(dur, 1) + dur_cumsum_prev = F.pad(dur_cumsum, [1, -1], mode='constant', value=0) + + pos_idx = torch.arange(dur.sum(-1).max())[None, None].to(dur.device) + token_mask = (pos_idx >= dur_cumsum_prev[:, :, None]) & (pos_idx < dur_cumsum[:, :, None]) + mel2token = (token_idx * token_mask.long()).sum(1) + return mel2token + + +class PitchPredictor(torch.nn.Module): + def __init__(self, idim, n_layers=5, n_chans=384, odim=2, kernel_size=5, dropout_rate=0.1): + super(PitchPredictor, self).__init__() + self.conv = torch.nn.ModuleList() + self.kernel_size = kernel_size + for idx in range(n_layers): + in_chans = idim if idx == 0 else n_chans + self.conv += [torch.nn.Sequential( + torch.nn.Conv1d(in_chans, n_chans, kernel_size, padding=kernel_size // 2), + torch.nn.ReLU(), + LayerNorm(n_chans, dim=1), + torch.nn.Dropout(dropout_rate) + )] + self.linear = torch.nn.Linear(n_chans, odim) + + def forward(self, x): + """ + + :param x: [B, T, H] + :return: [B, T, H] + """ + x = x.transpose(1, -1) # (B, idim, Tmax) + for f in self.conv: + x = f(x) # (B, C, Tmax) + x = self.linear(x.transpose(1, -1)) # (B, Tmax, H) + return x + + +class EnergyPredictor(PitchPredictor): + pass diff --git a/text_to_speech/modules/commons/normalizing_flow/__pycache__/res_flow.cpython-38.pyc b/text_to_speech/modules/commons/normalizing_flow/__pycache__/res_flow.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ca772d36f8493f599af65585b66f2f70c9e7c08 Binary files /dev/null and b/text_to_speech/modules/commons/normalizing_flow/__pycache__/res_flow.cpython-38.pyc differ diff --git a/text_to_speech/modules/commons/normalizing_flow/glow_modules.py b/text_to_speech/modules/commons/normalizing_flow/glow_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..d6df2bd99b0f89cfbe958c08d71b83317d1d6b8e --- /dev/null +++ b/text_to_speech/modules/commons/normalizing_flow/glow_modules.py @@ -0,0 +1,362 @@ +import scipy +from torch.nn import functional as F +import torch +from torch import nn +import numpy as np +from text_to_speech.modules.commons.wavenet import WN +from text_to_speech.modules.tts.glow import utils + + +class ActNorm(nn.Module): + def __init__(self, channels, ddi=False, **kwargs): + super().__init__() + self.channels = channels + self.initialized = not ddi + + self.logs = nn.Parameter(torch.zeros(1, channels, 1)) + self.bias = nn.Parameter(torch.zeros(1, channels, 1)) + + def forward(self, x, x_mask=None, reverse=False, **kwargs): + if x_mask is None: + x_mask = torch.ones(x.size(0), 1, x.size(2)).to(device=x.device, dtype=x.dtype) + x_len = torch.sum(x_mask, [1, 2]) + if not self.initialized: + self.initialize(x, x_mask) + self.initialized = True + + if reverse: + z = (x - self.bias) * torch.exp(-self.logs) * x_mask + logdet = torch.sum(-self.logs) * x_len + else: + z = (self.bias + torch.exp(self.logs) * x) * x_mask + logdet = torch.sum(self.logs) * x_len # [b] + return z, logdet + + def store_inverse(self): + pass + + def set_ddi(self, ddi): + self.initialized = not ddi + + def initialize(self, x, x_mask): + with torch.no_grad(): + denom = torch.sum(x_mask, [0, 2]) + m = torch.sum(x * x_mask, [0, 2]) / denom + m_sq = torch.sum(x * x * x_mask, [0, 2]) / denom + v = m_sq - (m ** 2) + logs = 0.5 * torch.log(torch.clamp_min(v, 1e-6)) + + bias_init = (-m * torch.exp(-logs)).view(*self.bias.shape).to(dtype=self.bias.dtype) + logs_init = (-logs).view(*self.logs.shape).to(dtype=self.logs.dtype) + + self.bias.data.copy_(bias_init) + self.logs.data.copy_(logs_init) + + +class InvConvNear(nn.Module): + def __init__(self, channels, n_split=4, no_jacobian=False, lu=True, n_sqz=2, **kwargs): + super().__init__() + assert (n_split % 2 == 0) + self.channels = channels + self.n_split = n_split + self.n_sqz = n_sqz + self.no_jacobian = no_jacobian + + w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split).normal_())[0] + if torch.det(w_init) < 0: + w_init[:, 0] = -1 * w_init[:, 0] + self.lu = lu + if lu: + # LU decomposition can slightly speed up the inverse + np_p, np_l, np_u = scipy.linalg.lu(w_init) + np_s = np.diag(np_u) + np_sign_s = np.sign(np_s) + np_log_s = np.log(np.abs(np_s)) + np_u = np.triu(np_u, k=1) + l_mask = np.tril(np.ones(w_init.shape, dtype=float), -1) + eye = np.eye(*w_init.shape, dtype=float) + + self.register_buffer('p', torch.Tensor(np_p.astype(float))) + self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(float))) + self.l = nn.Parameter(torch.Tensor(np_l.astype(float)), requires_grad=True) + self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(float)), requires_grad=True) + self.u = nn.Parameter(torch.Tensor(np_u.astype(float)), requires_grad=True) + self.register_buffer('l_mask', torch.Tensor(l_mask)) + self.register_buffer('eye', torch.Tensor(eye)) + else: + self.weight = nn.Parameter(w_init) + + def forward(self, x, x_mask=None, reverse=False, **kwargs): + b, c, t = x.size() + assert (c % self.n_split == 0) + if x_mask is None: + x_mask = 1 + x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t + else: + x_len = torch.sum(x_mask, [1, 2]) + + x = x.view(b, self.n_sqz, c // self.n_split, self.n_split // self.n_sqz, t) + x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.n_split, c // self.n_split, t) + + if self.lu: + self.weight, log_s = self._get_weight() + logdet = log_s.sum() + logdet = logdet * (c / self.n_split) * x_len + else: + logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len # [b] + + if reverse: + if hasattr(self, "weight_inv"): + weight = self.weight_inv + else: + weight = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype) + logdet = -logdet + else: + weight = self.weight + if self.no_jacobian: + logdet = 0 + + weight = weight.view(self.n_split, self.n_split, 1, 1) + z = F.conv2d(x, weight) + + z = z.view(b, self.n_sqz, self.n_split // self.n_sqz, c // self.n_split, t) + z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask + return z, logdet + + def _get_weight(self): + l, log_s, u = self.l, self.log_s, self.u + l = l * self.l_mask + self.eye + u = u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(log_s)) + weight = torch.matmul(self.p, torch.matmul(l, u)) + return weight, log_s + + def store_inverse(self): + weight, _ = self._get_weight() + self.weight_inv = torch.inverse(weight.float()).to(next(self.parameters()).device) + + +class InvConv(nn.Module): + def __init__(self, channels, no_jacobian=False, lu=True, **kwargs): + super().__init__() + w_shape = [channels, channels] + w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(float) + LU_decomposed = lu + if not LU_decomposed: + # Sample a random orthogonal matrix: + self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init))) + else: + np_p, np_l, np_u = scipy.linalg.lu(w_init) + np_s = np.diag(np_u) + np_sign_s = np.sign(np_s) + np_log_s = np.log(np.abs(np_s)) + np_u = np.triu(np_u, k=1) + l_mask = np.tril(np.ones(w_shape, dtype=float), -1) + eye = np.eye(*w_shape, dtype=float) + + self.register_buffer('p', torch.Tensor(np_p.astype(float))) + self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(float))) + self.l = nn.Parameter(torch.Tensor(np_l.astype(float))) + self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(float))) + self.u = nn.Parameter(torch.Tensor(np_u.astype(float))) + self.l_mask = torch.Tensor(l_mask) + self.eye = torch.Tensor(eye) + self.w_shape = w_shape + self.LU = LU_decomposed + self.weight = None + + def get_weight(self, device, reverse): + w_shape = self.w_shape + self.p = self.p.to(device) + self.sign_s = self.sign_s.to(device) + self.l_mask = self.l_mask.to(device) + self.eye = self.eye.to(device) + l = self.l * self.l_mask + self.eye + u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(self.log_s)) + dlogdet = self.log_s.sum() + if not reverse: + w = torch.matmul(self.p, torch.matmul(l, u)) + else: + l = torch.inverse(l.double()).float() + u = torch.inverse(u.double()).float() + w = torch.matmul(u, torch.matmul(l, self.p.inverse())) + return w.view(w_shape[0], w_shape[1], 1), dlogdet + + def forward(self, x, x_mask=None, reverse=False, **kwargs): + """ + log-det = log|abs(|W|)| * pixels + """ + b, c, t = x.size() + if x_mask is None: + x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t + else: + x_len = torch.sum(x_mask, [1, 2]) + logdet = 0 + if not reverse: + weight, dlogdet = self.get_weight(x.device, reverse) + z = F.conv1d(x, weight) + if logdet is not None: + logdet = logdet + dlogdet * x_len + return z, logdet + else: + if self.weight is None: + weight, dlogdet = self.get_weight(x.device, reverse) + else: + weight, dlogdet = self.weight, self.dlogdet + z = F.conv1d(x, weight) + if logdet is not None: + logdet = logdet - dlogdet * x_len + return z, logdet + + def store_inverse(self): + self.weight, self.dlogdet = self.get_weight('cuda', reverse=True) + + +class CouplingBlock(nn.Module): + def __init__(self, in_channels, hidden_channels, kernel_size, dilation_rate, n_layers, + gin_channels=0, p_dropout=0, sigmoid_scale=False, wn=None): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + self.sigmoid_scale = sigmoid_scale + + start = torch.nn.Conv1d(in_channels // 2, hidden_channels, 1) + start = torch.nn.utils.weight_norm(start) + self.start = start + # Initializing last layer to 0 makes the affine coupling layers + # do nothing at first. This helps with training stability + end = torch.nn.Conv1d(hidden_channels, in_channels, 1) + end.weight.data.zero_() + end.bias.data.zero_() + self.end = end + self.wn = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels, p_dropout) + if wn is not None: + self.wn.in_layers = wn.in_layers + self.wn.res_skip_layers = wn.res_skip_layers + + def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs): + if x_mask is None: + x_mask = 1 + x_0, x_1 = x[:, :self.in_channels // 2], x[:, self.in_channels // 2:] + + x = self.start(x_0) * x_mask + x = self.wn(x, x_mask, g) + out = self.end(x) + + z_0 = x_0 + m = out[:, :self.in_channels // 2, :] + logs = out[:, self.in_channels // 2:, :] + if self.sigmoid_scale: + logs = torch.log(1e-6 + torch.sigmoid(logs + 2)) + if reverse: + z_1 = (x_1 - m) * torch.exp(-logs) * x_mask + logdet = torch.sum(-logs * x_mask, [1, 2]) + else: + z_1 = (m + torch.exp(logs) * x_1) * x_mask + logdet = torch.sum(logs * x_mask, [1, 2]) + z = torch.cat([z_0, z_1], 1) + return z, logdet + + def store_inverse(self): + self.wn.remove_weight_norm() + + +class Glow(nn.Module): + def __init__(self, + in_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_blocks, + n_layers, + p_dropout=0., + n_split=4, + n_sqz=2, + sigmoid_scale=False, + gin_channels=0, + inv_conv_type='near', + share_cond_layers=False, + share_wn_layers=0, + ): + super().__init__() + + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_blocks = n_blocks + self.n_layers = n_layers + self.p_dropout = p_dropout + self.n_split = n_split + self.n_sqz = n_sqz + self.sigmoid_scale = sigmoid_scale + self.gin_channels = gin_channels + self.share_cond_layers = share_cond_layers + if gin_channels != 0 and share_cond_layers: + cond_layer = torch.nn.Conv1d(gin_channels * n_sqz, 2 * hidden_channels * n_layers, 1) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') + wn = None + self.flows = nn.ModuleList() + for b in range(n_blocks): + self.flows.append(ActNorm(channels=in_channels * n_sqz)) + if inv_conv_type == 'near': + self.flows.append(InvConvNear(channels=in_channels * n_sqz, n_split=n_split, n_sqz=n_sqz)) + if inv_conv_type == 'invconv': + self.flows.append(InvConv(channels=in_channels * n_sqz)) + if share_wn_layers > 0: + if b % share_wn_layers == 0: + wn = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels * n_sqz, + p_dropout, share_cond_layers) + self.flows.append( + CouplingBlock( + in_channels * n_sqz, + hidden_channels, + kernel_size=kernel_size, + dilation_rate=dilation_rate, + n_layers=n_layers, + gin_channels=gin_channels * n_sqz, + p_dropout=p_dropout, + sigmoid_scale=sigmoid_scale, + wn=wn + )) + + def forward(self, x, x_mask=None, g=None, reverse=False, return_hiddens=False): + logdet_tot = 0 + if not reverse: + flows = self.flows + else: + flows = reversed(self.flows) + if return_hiddens: + hs = [] + if self.n_sqz > 1: + x, x_mask_ = utils.squeeze(x, x_mask, self.n_sqz) + if g is not None: + g, _ = utils.squeeze(g, x_mask, self.n_sqz) + x_mask = x_mask_ + if self.share_cond_layers and g is not None: + g = self.cond_layer(g) + for f in flows: + x, logdet = f(x, x_mask, g=g, reverse=reverse) + if return_hiddens: + hs.append(x) + logdet_tot += logdet + if self.n_sqz > 1: + x, x_mask = utils.unsqueeze(x, x_mask, self.n_sqz) + if return_hiddens: + return x, logdet_tot, hs + return x, logdet_tot + + def store_inverse(self): + def remove_weight_norm(m): + try: + nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(remove_weight_norm) + for f in self.flows: + f.store_inverse() diff --git a/text_to_speech/modules/commons/normalizing_flow/res_flow.py b/text_to_speech/modules/commons/normalizing_flow/res_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..a237b05b7fc0fd8626d3da95ad3a39171e7129fc --- /dev/null +++ b/text_to_speech/modules/commons/normalizing_flow/res_flow.py @@ -0,0 +1,61 @@ +import torch +from torch import nn +from text_to_speech.modules.commons.conv import ConditionalConvBlocks +from text_to_speech.modules.commons.wavenet import WN + + +class FlipLayer(nn.Module): + def forward(self, x, nonpadding, cond=None, reverse=False): + x = torch.flip(x, [1]) + return x + + +class CouplingLayer(nn.Module): + def __init__(self, c_in, hidden_size, kernel_size, n_layers, p_dropout=0, c_in_g=0, nn_type='wn'): + super().__init__() + self.channels = c_in + self.hidden_size = hidden_size + self.kernel_size = kernel_size + self.n_layers = n_layers + self.c_half = c_in // 2 + + self.pre = nn.Conv1d(self.c_half, hidden_size, 1) + if nn_type == 'wn': + self.enc = WN(hidden_size, kernel_size, 1, n_layers, p_dropout=p_dropout, + c_cond=c_in_g) + elif nn_type == 'conv': + self.enc = ConditionalConvBlocks( + hidden_size, c_in_g, hidden_size, None, kernel_size, + layers_in_block=1, is_BTC=False, num_layers=n_layers) + self.post = nn.Conv1d(hidden_size, self.c_half, 1) + + def forward(self, x, nonpadding, cond=None, reverse=False): + x0, x1 = x[:, :self.c_half], x[:, self.c_half:] + x_ = self.pre(x0) * nonpadding + x_ = self.enc(x_, nonpadding=nonpadding, cond=cond) + m = self.post(x_) + x1 = m + x1 if not reverse else x1 - m + x = torch.cat([x0, x1], 1) + return x * nonpadding + + +class ResFlow(nn.Module): + def __init__(self, + c_in, + hidden_size, + kernel_size, + n_flow_layers, + n_flow_steps=4, + c_cond=0, + nn_type='wn'): + super().__init__() + self.flows = nn.ModuleList() + for i in range(n_flow_steps): + self.flows.append( + CouplingLayer(c_in, hidden_size, kernel_size, n_flow_layers, c_in_g=c_cond, nn_type=nn_type)) + self.flows.append(FlipLayer()) + + def forward(self, x, nonpadding, cond=None, reverse=False): + for flow in (self.flows if not reverse else reversed(self.flows)): + x = flow(x, nonpadding, cond=cond, reverse=reverse) + return x diff --git a/text_to_speech/modules/commons/normalizing_flow/utils.py b/text_to_speech/modules/commons/normalizing_flow/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7eb56ec514bff822ba1a19a6474207ed82492410 --- /dev/null +++ b/text_to_speech/modules/commons/normalizing_flow/utils.py @@ -0,0 +1,29 @@ +import torch + + +def squeeze(x, x_mask=None, n_sqz=2): + b, c, t = x.size() + + t = (t // n_sqz) * n_sqz + x = x[:, :, :t] + x_sqz = x.view(b, c, t // n_sqz, n_sqz) + x_sqz = x_sqz.permute(0, 3, 1, 2).contiguous().view(b, c * n_sqz, t // n_sqz) + + if x_mask is not None: + x_mask = x_mask[:, :, n_sqz - 1::n_sqz] + else: + x_mask = torch.ones(b, 1, t // n_sqz).to(device=x.device, dtype=x.dtype) + return x_sqz * x_mask, x_mask + + +def unsqueeze(x, x_mask=None, n_sqz=2): + b, c, t = x.size() + + x_unsqz = x.view(b, n_sqz, c // n_sqz, t) + x_unsqz = x_unsqz.permute(0, 2, 3, 1).contiguous().view(b, c // n_sqz, t * n_sqz) + + if x_mask is not None: + x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, n_sqz).view(b, 1, t * n_sqz) + else: + x_mask = torch.ones(b, 1, t * n_sqz).to(device=x.device, dtype=x.dtype) + return x_unsqz * x_mask, x_mask diff --git a/text_to_speech/modules/commons/rel_transformer.py b/text_to_speech/modules/commons/rel_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..621757d6d41b9fca910a30806f466b09f0d9d1bc --- /dev/null +++ b/text_to_speech/modules/commons/rel_transformer.py @@ -0,0 +1,611 @@ +import math +import torch +from torch import nn +from torch.nn import functional as F +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.modules.commons.layers import Embedding +from text_to_speech.utils.nn.seq_utils import group_hidden_by_segs, expand_word2ph + +import transformers + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def shift_1d(x): + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] + return x + + +def sequence_mask(length, max_length=None): + if max_length is None: + max_length = length.max() + x = torch.arange(max_length, dtype=length.dtype, device=length.device) + return x.unsqueeze(0) < length.unsqueeze(1) + + +class Encoder(nn.Module): + def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., + window_size=None, block_length=None, pre_ln=False, **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + self.block_length = block_length + self.pre_ln = pre_ln + + self.drop = nn.Dropout(p_dropout) + self.attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.attn_layers.append( + MultiHeadAttention(hidden_channels, hidden_channels, n_heads, window_size=window_size, + p_dropout=p_dropout, block_length=block_length)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + if pre_ln: + self.last_ln = LayerNorm(hidden_channels) + + def forward(self, x, x_mask): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + for i in range(self.n_layers): + x = x * x_mask + x_ = x + if self.pre_ln: + x = self.norm_layers_1[i](x) + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = x_ + y + if not self.pre_ln: + x = self.norm_layers_1[i](x) + + x_ = x + if self.pre_ln: + x = self.norm_layers_2[i](x) + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = x_ + y + if not self.pre_ln: + x = self.norm_layers_2[i](x) + if self.pre_ln: + x = self.last_ln(x) + x = x * x_mask + return x + + +class MultiHeadAttention(nn.Module): + def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0., + block_length=None, proximal_bias=False, proximal_init=False): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.window_size = window_size + self.heads_share = heads_share + self.block_length = block_length + self.proximal_bias = proximal_bias + self.p_dropout = p_dropout + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = nn.Conv1d(channels, channels, 1) + self.conv_k = nn.Conv1d(channels, channels, 1) + self.conv_v = nn.Conv1d(channels, channels, 1) + if window_size is not None: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels ** -0.5 + self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + self.conv_o = nn.Conv1d(channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + nn.init.xavier_uniform_(self.conv_q.weight) + nn.init.xavier_uniform_(self.conv_k.weight) + if proximal_init: + self.conv_k.weight.data.copy_(self.conv_q.weight.data) + self.conv_k.bias.data.copy_(self.conv_q.bias.data) + nn.init.xavier_uniform_(self.conv_v.weight) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + # reshape [b, d, t] -> [b, n_h, t, d_k] + b, d, t_s, t_t = (*key.size(), query.size(2)) + query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + + scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels) + if self.window_size is not None: + assert t_s == t_t, "Relative attention is only available for self-attention." + key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) + rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) + rel_logits = self._relative_position_to_absolute_position(rel_logits) + scores_local = rel_logits / math.sqrt(self.k_channels) + scores = scores + scores_local + if self.proximal_bias: + assert t_s == t_t, "Proximal bias is only available for self-attention." + scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length is not None: + block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) + scores = scores * block_mask + -1e4 * (1 - block_mask) + p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + if self.window_size is not None: + relative_weights = self._absolute_position_to_relative_position(p_attn) + value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) + output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) + output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] + return output, p_attn + + def _matmul_with_relative_values(self, x, y): + """ + x: [b, h, l, m] + y: [h or 1, m, d] + ret: [b, h, l, d] + """ + ret = torch.matmul(x, y.unsqueeze(0)) + return ret + + def _matmul_with_relative_keys(self, x, y): + """ + x: [b, h, l, d] + y: [h or 1, m, d] + ret: [b, h, l, m] + """ + ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + return ret + + def _get_relative_embeddings(self, relative_embeddings, length): + max_relative_position = 2 * self.window_size + 1 + # Pad first before slice to avoid using cond ops. + pad_length = max(length - (self.window_size + 1), 0) + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + if pad_length > 0: + padded_relative_embeddings = F.pad( + relative_embeddings, + convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) + else: + padded_relative_embeddings = relative_embeddings + used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] + return used_relative_embeddings + + def _relative_position_to_absolute_position(self, x): + """ + x: [b, h, l, 2*l-1] + ret: [b, h, l, l] + """ + batch, heads, length, _ = x.size() + # Concat columns of pad to shift from relative to absolute indexing. + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + x_flat = x.view([batch, heads, length * 2 * length]) + x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])) + + # Reshape and slice out the padded elements. + x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] + return x_final + + def _absolute_position_to_relative_position(self, x): + """ + x: [b, h, l, l] + ret: [b, h, l, 2*l-1] + """ + batch, heads, length, _ = x.size() + # padd along column + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) + x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] + return x_final + + def _attention_bias_proximal(self, length): + """Bias for self-attention to encourage attention to close positions. + Args: + length: an integer scalar. + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + + +class FFN(nn.Module): + def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.activation = activation + + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) + self.conv_2 = nn.Conv1d(filter_channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(x * x_mask) + if self.activation == "gelu": + x = x * torch.sigmoid(1.702 * x) + else: + x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(x * x_mask) + return x * x_mask + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-4): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + n_dims = len(x.shape) + mean = torch.mean(x, 1, keepdim=True) + variance = torch.mean((x - mean) ** 2, 1, keepdim=True) + + x = (x - mean) * torch.rsqrt(variance + self.eps) + + shape = [1, -1] + [1] * (n_dims - 2) + x = x * self.gamma.view(*shape) + self.beta.view(*shape) + return x + + +class ConvReluNorm(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + assert n_layers > 1, "Number of layers should be larger than 0." + + self.conv_layers = nn.ModuleList() + self.norm_layers = nn.ModuleList() + self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.relu_drop = nn.Sequential( + nn.ReLU(), + nn.Dropout(p_dropout)) + for _ in range(n_layers - 1): + self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask): + x_org = x + for i in range(self.n_layers): + x = self.conv_layers[i](x * x_mask) + x = self.norm_layers[i](x) + x = self.relu_drop(x) + x = x_org + self.proj(x) + return x * x_mask + + +class RelTransformerEncoder(nn.Module): + def __init__(self, + n_vocab, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout=0.0, + window_size=4, + block_length=None, + prenet=True, + pre_ln=True, + ): + + super().__init__() + + self.n_vocab = n_vocab + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + self.block_length = block_length + self.prenet = prenet + if n_vocab > 0: + self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0) + + if prenet: + self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels, + kernel_size=5, n_layers=3, p_dropout=0) + self.encoder = Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + window_size=window_size, + block_length=block_length, + pre_ln=pre_ln, + ) + + def forward(self, x, x_mask=None): + if self.n_vocab > 0: + x_lengths = (x > 0).long().sum(-1) + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + else: + x_lengths = (x.abs().sum(-1) > 0).long().sum(-1) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + + if self.prenet: + x = self.pre(x, x_mask) + x = self.encoder(x, x_mask) + return x.transpose(1, 2) + + +class Pooler(nn.Module): + """ + Parameter-free poolers to get the sentence embedding + 'cls': [CLS] representation with BERT/RoBERTa's MLP pooler. + 'cls_before_pooler': [CLS] representation without the original MLP pooler. + 'avg': average of the last layers' hidden states at each token. + 'avg_top2': average of the last two layers. + 'avg_first_last': average of the first and the last layers. + """ + def __init__(self, pooler_type): + super().__init__() + self.pooler_type = pooler_type + assert self.pooler_type in ["cls", "cls_before_pooler", "avg", "avg_top2", "avg_first_last"], "unrecognized pooling type %s" % self.pooler_type + + def forward(self, attention_mask, outputs): + last_hidden = outputs.last_hidden_state + pooler_output = outputs.pooler_output + hidden_states = outputs.hidden_states + + if self.pooler_type in ['cls_before_pooler', 'cls']: + return last_hidden[:, 0] + elif self.pooler_type == "avg": + return ((last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)) + elif self.pooler_type == "avg_first_last": + first_hidden = hidden_states[0] + last_hidden = hidden_states[-1] + pooled_result = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1) + return pooled_result + elif self.pooler_type == "avg_top2": + second_last_hidden = hidden_states[-2] + last_hidden = hidden_states[-1] + pooled_result = ((last_hidden + second_last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1) + return pooled_result + else: + raise NotImplementedError + + +class Similarity(nn.Module): + """ + Dot product or cosine similarity + """ + + def __init__(self, temp): + super().__init__() + self.temp = temp + self.cos = nn.CosineSimilarity(dim=-1) + self.record = None + self.pos_avg = 0.0 + self.neg_avg = 0.0 + + def forward(self, x, y): + sim = self.cos(x, y) + self.record = sim.detach() # [64,64] + min_size = min(self.record.shape[0], self.record.shape[1]) # 64 + num_item = self.record.shape[0] * self.record.shape[1] # 4096 + self.pos_avg = self.record.diag().sum() / min_size + if num_item - min_size == 0: + self.neg_avg = (self.record.sum() - self.record.diag().sum()) / 1 + return sim / self.temp + if torch.any(torch.isnan(self.record)).item() is True: + print("we got self.record has nan when compute neg_avg") + if torch.any(torch.isnan(self.record.diag())).item() is True: + print("we got self.record.diag() has nan when compute neg_avg") + self.neg_avg = (self.record.sum() - self.record.diag().sum()) / (num_item - min_size) + + return sim / self.temp + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, hidden_size): + super().__init__() + self.dense = nn.Linear(hidden_size, hidden_size) + self.transform_act_fn = F.gelu + self.LayerNorm = nn.LayerNorm(hidden_size, eps=1e-12) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, hid_dim, out_dim): + super().__init__() + self.transform = BertPredictionHeadTransform(hid_dim) + self.decoder = nn.Linear(hid_dim, out_dim, bias=False) + self.bias = nn.Parameter(torch.zeros(out_dim)) + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +# V2_2 +# change add to concat. +# now support finetune BERT +# grad_bert=0.1 & trainable_block_idx=0 +class BERTRelTransformerEncoder(nn.Module): + def __init__(self, + n_vocab, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout=0.0, + window_size=4, + block_length=None, + prenet=True, + pre_ln=True, + ): + + super().__init__() + + self.n_vocab = n_vocab + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + self.block_length = block_length + self.prenet = prenet + if n_vocab > 0: + self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0) + + if prenet: + self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels, + kernel_size=5, n_layers=3, p_dropout=0) + self.encoder1 = Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers//2, + kernel_size, + p_dropout, + window_size=window_size, + block_length=block_length, + pre_ln=pre_ln, + ) + + self.encoder2 = Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers - n_layers//2, + kernel_size, + p_dropout, + window_size=window_size, + block_length=block_length, + pre_ln=pre_ln, + ) + + if hparams['ds_name'] in ['ljspeech', 'libritts', 'librispeech']: + model_name = 'bert-base-uncased' + elif hparams['ds_name'] in ['biaobei', 'wenetspeech']: + model_name = 'bert-base-chinese' + else: + raise NotImplementedError() + + self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_name) + config = transformers.AutoConfig.from_pretrained(model_name) + if hparams.get("load_bert_from_pretrained", True): + print("Load BERT from pretrained model ...") + self.bert = transformers.AutoModel.from_pretrained(model_name,config=config) + trainable_start_block = hparams.get("bert_trainable_start_block", 0) + else: + print("Initialize BERT from scratch!") + self.bert = transformers.BertModel(config=config) + trainable_start_block = 0 + + for k, v in self.bert.named_parameters(): + if 'embeddings' in k: + v.requires_grad = False + elif 'encoder.layer' in k: + block_idx = int(k.split(".")[2]) + if block_idx < trainable_start_block: + v.requires_grad = False + else: + v.requires_grad = True + elif 'cls' in k: + v.requires_grad = True + else: + print("Unhandled key: {}, set to requires_grad...".format(k)) + v.requires_grad = True + + self.bert_combine = nn.Sequential(*[ + nn.Conv1d(768 + hidden_channels, hidden_channels, 3, 1, 1), + nn.ReLU(), + ]) + self.pooler = Pooler("avg") + self.sim = Similarity(temp=0.05) + + def forward(self, x, x_mask=None, bert_feats=None, ph2word=None, **kwargs): + if self.n_vocab > 0: + x_lengths = (x > 0).long().sum(-1) + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + else: + x_lengths = (x.abs().sum(-1) > 0).long().sum(-1) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + + if self.prenet: + x = self.pre(x, x_mask) + x = self.encoder1(x, x_mask) + bert_outputs = self.bert(bert_feats['bert_input_ids'], + attention_mask=bert_feats['bert_attention_mask'], + token_type_ids=bert_feats['bert_token_type_ids'], + output_hidden_states=True) + bert_num_blocks = hparams.get("bert_num_blocks", 12) # total 1+12blocks in bert + bert_embedding = bert_outputs['hidden_states'][bert_num_blocks] + # bert_embedding = bert_outputs['last_hidden_state'] + grad_bert = hparams.get("grad_bert", 0.1) + bert_embedding = bert_embedding.detach() * (1-grad_bert) + bert_embedding * grad_bert + bert_word_embedding, _ = group_hidden_by_segs(bert_embedding, bert_feats['bert_token2word'], bert_feats['bert_token2word'].max().item()) + bert_ph_embedding = expand_word2ph(bert_word_embedding, ph2word) + bert_ph_embedding = bert_ph_embedding.transpose(1,2) + x = torch.cat([x, bert_ph_embedding], dim=1) + x = self.bert_combine(x) + x = self.encoder2(x, x_mask) + return x.transpose(1, 2) + + diff --git a/text_to_speech/modules/commons/rel_transformer_history.py b/text_to_speech/modules/commons/rel_transformer_history.py new file mode 100644 index 0000000000000000000000000000000000000000..9b1dfc60f6ee5174cced7c826ed7f8c3bb0a5843 --- /dev/null +++ b/text_to_speech/modules/commons/rel_transformer_history.py @@ -0,0 +1,628 @@ +import math +import torch +from torch import nn +from torch.nn import functional as F +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.modules.commons.layers import Embedding + +import transformers + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def shift_1d(x): + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] + return x + + +def sequence_mask(length, max_length=None): + if max_length is None: + max_length = length.max() + x = torch.arange(max_length, dtype=length.dtype, device=length.device) + return x.unsqueeze(0) < length.unsqueeze(1) + + +class Encoder(nn.Module): + def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., + window_size=None, block_length=None, pre_ln=False, **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + self.block_length = block_length + self.pre_ln = pre_ln + + self.drop = nn.Dropout(p_dropout) + self.attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.attn_layers.append( + MultiHeadAttention(hidden_channels, hidden_channels, n_heads, window_size=window_size, + p_dropout=p_dropout, block_length=block_length)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + if pre_ln: + self.last_ln = LayerNorm(hidden_channels) + + def forward(self, x, x_mask): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + for i in range(self.n_layers): + x = x * x_mask + x_ = x + if self.pre_ln: + x = self.norm_layers_1[i](x) + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = x_ + y + if not self.pre_ln: + x = self.norm_layers_1[i](x) + + x_ = x + if self.pre_ln: + x = self.norm_layers_2[i](x) + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = x_ + y + if not self.pre_ln: + x = self.norm_layers_2[i](x) + if self.pre_ln: + x = self.last_ln(x) + x = x * x_mask + return x + + +class MultiHeadAttention(nn.Module): + def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0., + block_length=None, proximal_bias=False, proximal_init=False): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.window_size = window_size + self.heads_share = heads_share + self.block_length = block_length + self.proximal_bias = proximal_bias + self.p_dropout = p_dropout + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = nn.Conv1d(channels, channels, 1) + self.conv_k = nn.Conv1d(channels, channels, 1) + self.conv_v = nn.Conv1d(channels, channels, 1) + if window_size is not None: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels ** -0.5 + self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + self.conv_o = nn.Conv1d(channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + nn.init.xavier_uniform_(self.conv_q.weight) + nn.init.xavier_uniform_(self.conv_k.weight) + if proximal_init: + self.conv_k.weight.data.copy_(self.conv_q.weight.data) + self.conv_k.bias.data.copy_(self.conv_q.bias.data) + nn.init.xavier_uniform_(self.conv_v.weight) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + # reshape [b, d, t] -> [b, n_h, t, d_k] + b, d, t_s, t_t = (*key.size(), query.size(2)) + query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + + scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels) + if self.window_size is not None: + assert t_s == t_t, "Relative attention is only available for self-attention." + key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) + rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) + rel_logits = self._relative_position_to_absolute_position(rel_logits) + scores_local = rel_logits / math.sqrt(self.k_channels) + scores = scores + scores_local + if self.proximal_bias: + assert t_s == t_t, "Proximal bias is only available for self-attention." + scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length is not None: + block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) + scores = scores * block_mask + -1e4 * (1 - block_mask) + p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + if self.window_size is not None: + relative_weights = self._absolute_position_to_relative_position(p_attn) + value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) + output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) + output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] + return output, p_attn + + def _matmul_with_relative_values(self, x, y): + """ + x: [b, h, l, m] + y: [h or 1, m, d] + ret: [b, h, l, d] + """ + ret = torch.matmul(x, y.unsqueeze(0)) + return ret + + def _matmul_with_relative_keys(self, x, y): + """ + x: [b, h, l, d] + y: [h or 1, m, d] + ret: [b, h, l, m] + """ + ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + return ret + + def _get_relative_embeddings(self, relative_embeddings, length): + max_relative_position = 2 * self.window_size + 1 + # Pad first before slice to avoid using cond ops. + pad_length = max(length - (self.window_size + 1), 0) + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + if pad_length > 0: + padded_relative_embeddings = F.pad( + relative_embeddings, + convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) + else: + padded_relative_embeddings = relative_embeddings + used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] + return used_relative_embeddings + + def _relative_position_to_absolute_position(self, x): + """ + x: [b, h, l, 2*l-1] + ret: [b, h, l, l] + """ + batch, heads, length, _ = x.size() + # Concat columns of pad to shift from relative to absolute indexing. + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + x_flat = x.view([batch, heads, length * 2 * length]) + x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])) + + # Reshape and slice out the padded elements. + x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] + return x_final + + def _absolute_position_to_relative_position(self, x): + """ + x: [b, h, l, l] + ret: [b, h, l, 2*l-1] + """ + batch, heads, length, _ = x.size() + # padd along column + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) + x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] + return x_final + + def _attention_bias_proximal(self, length): + """Bias for self-attention to encourage attention to close positions. + Args: + length: an integer scalar. + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + + +class FFN(nn.Module): + def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.activation = activation + + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) + self.conv_2 = nn.Conv1d(filter_channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(x * x_mask) + if self.activation == "gelu": + x = x * torch.sigmoid(1.702 * x) + else: + x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(x * x_mask) + return x * x_mask + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-4): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + n_dims = len(x.shape) + mean = torch.mean(x, 1, keepdim=True) + variance = torch.mean((x - mean) ** 2, 1, keepdim=True) + + x = (x - mean) * torch.rsqrt(variance + self.eps) + + shape = [1, -1] + [1] * (n_dims - 2) + x = x * self.gamma.view(*shape) + self.beta.view(*shape) + return x + + +class ConvReluNorm(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + assert n_layers > 1, "Number of layers should be larger than 0." + + self.conv_layers = nn.ModuleList() + self.norm_layers = nn.ModuleList() + self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.relu_drop = nn.Sequential( + nn.ReLU(), + nn.Dropout(p_dropout)) + for _ in range(n_layers - 1): + self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask): + x_org = x + for i in range(self.n_layers): + x = self.conv_layers[i](x * x_mask) + x = self.norm_layers[i](x) + x = self.relu_drop(x) + x = x_org + self.proj(x) + return x * x_mask + + +class RelTransformerEncoder(nn.Module): + def __init__(self, + n_vocab, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout=0.0, + window_size=4, + block_length=None, + prenet=True, + pre_ln=True, + ): + + super().__init__() + + self.n_vocab = n_vocab + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + self.block_length = block_length + self.prenet = prenet + if n_vocab > 0: + self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0) + + if prenet: + self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels, + kernel_size=5, n_layers=3, p_dropout=0) + self.encoder = Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + window_size=window_size, + block_length=block_length, + pre_ln=pre_ln, + ) + + def forward(self, x, x_mask=None): + if self.n_vocab > 0: + x_lengths = (x > 0).long().sum(-1) + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + else: + x_lengths = (x.abs().sum(-1) > 0).long().sum(-1) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + + if self.prenet: + x = self.pre(x, x_mask) + x = self.encoder(x, x_mask) + return x.transpose(1, 2) + + +def group_hidden_by_segs(h, seg_ids, max_len): + """ + :param h: [B, T, H] + :param seg_ids: [B, T] + :return: h_ph: [B, T_ph, H] + """ + B, T, H = h.shape + h_gby_segs = h.new_zeros([B, max_len + 1, H]).scatter_add_(1, seg_ids[:, :, None].repeat([1, 1, H]), h) + all_ones = h.new_ones(h.shape[:2]) + cnt_gby_segs = h.new_zeros([B, max_len + 1]).scatter_add_(1, seg_ids, all_ones).contiguous() + h_gby_segs = h_gby_segs[:, 1:] + cnt_gby_segs = cnt_gby_segs[:, 1:] + h_gby_segs = h_gby_segs / torch.clamp(cnt_gby_segs[:, :, None], min=1) + # assert h_gby_segs.shape[-1] == 192 + return h_gby_segs + +def postprocess_word2ph(word_encoding, ph2word): + word_encoding = F.pad(word_encoding,[0,0,1,0]) + ph2word_ = ph2word[:, :, None].repeat([1, 1, word_encoding.shape[-1]]) + out = torch.gather(word_encoding, 1, ph2word_) # [B, T, H] + return out + + +class Pooler(nn.Module): + """ + Parameter-free poolers to get the sentence embedding + 'cls': [CLS] representation with BERT/RoBERTa's MLP pooler. + 'cls_before_pooler': [CLS] representation without the original MLP pooler. + 'avg': average of the last layers' hidden states at each token. + 'avg_top2': average of the last two layers. + 'avg_first_last': average of the first and the last layers. + """ + def __init__(self, pooler_type): + super().__init__() + self.pooler_type = pooler_type + assert self.pooler_type in ["cls", "cls_before_pooler", "avg", "avg_top2", "avg_first_last"], "unrecognized pooling type %s" % self.pooler_type + + def forward(self, attention_mask, outputs): + last_hidden = outputs.last_hidden_state + pooler_output = outputs.pooler_output + hidden_states = outputs.hidden_states + + if self.pooler_type in ['cls_before_pooler', 'cls']: + return last_hidden[:, 0] + elif self.pooler_type == "avg": + return ((last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)) + elif self.pooler_type == "avg_first_last": + first_hidden = hidden_states[0] + last_hidden = hidden_states[-1] + pooled_result = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1) + return pooled_result + elif self.pooler_type == "avg_top2": + second_last_hidden = hidden_states[-2] + last_hidden = hidden_states[-1] + pooled_result = ((last_hidden + second_last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1) + return pooled_result + else: + raise NotImplementedError + + +class Similarity(nn.Module): + """ + Dot product or cosine similarity + """ + + def __init__(self, temp): + super().__init__() + self.temp = temp + self.cos = nn.CosineSimilarity(dim=-1) + self.record = None + self.pos_avg = 0.0 + self.neg_avg = 0.0 + + def forward(self, x, y): + sim = self.cos(x, y) + self.record = sim.detach() # [64,64] + min_size = min(self.record.shape[0], self.record.shape[1]) # 64 + num_item = self.record.shape[0] * self.record.shape[1] # 4096 + self.pos_avg = self.record.diag().sum() / min_size + if num_item - min_size == 0: + self.neg_avg = (self.record.sum() - self.record.diag().sum()) / 1 + return sim / self.temp + if torch.any(torch.isnan(self.record)).item() is True: + print("we got self.record has nan when compute neg_avg") + if torch.any(torch.isnan(self.record.diag())).item() is True: + print("we got self.record.diag() has nan when compute neg_avg") + self.neg_avg = (self.record.sum() - self.record.diag().sum()) / (num_item - min_size) + + return sim / self.temp + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, hidden_size): + super().__init__() + self.dense = nn.Linear(hidden_size, hidden_size) + self.transform_act_fn = F.gelu + self.LayerNorm = nn.LayerNorm(hidden_size, eps=1e-12) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, hid_dim, out_dim): + super().__init__() + self.transform = BertPredictionHeadTransform(hid_dim) + self.decoder = nn.Linear(hid_dim, out_dim, bias=False) + self.bias = nn.Parameter(torch.zeros(out_dim)) + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +# V2_2 +# change add to concat. +# now support finetune BERT +# grad_bert=0.1 & trainable_block_idx=0 +class BERTRelTransformerEncoder(nn.Module): + def __init__(self, + n_vocab, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout=0.0, + window_size=4, + block_length=None, + prenet=True, + pre_ln=True, + ): + + super().__init__() + + self.n_vocab = n_vocab + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + self.block_length = block_length + self.prenet = prenet + if n_vocab > 0: + self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0) + + if prenet: + self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels, + kernel_size=5, n_layers=3, p_dropout=0) + self.encoder1 = Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers//2, + kernel_size, + p_dropout, + window_size=window_size, + block_length=block_length, + pre_ln=pre_ln, + ) + + self.encoder2 = Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers - n_layers//2, + kernel_size, + p_dropout, + window_size=window_size, + block_length=block_length, + pre_ln=pre_ln, + ) + + if hparams['ds_name'] in ['ljspeech', 'libritts']: + model_name = 'bert-base-uncased' + elif hparams['ds_name'] in ['biaobei']: + model_name = 'bert-base-chinese' + else: + raise NotImplementedError() + + config_kwargs = {'cache_dir': None, 'revision': 'main', 'use_auth_token': None} + self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_name) + config = transformers.AutoConfig.from_pretrained(model_name, **config_kwargs) + self.bert = transformers.AutoModelForMaskedLM.from_pretrained( + model_name, + config=config, + ) + self.cl_head = BertLMPredictionHead(768, 768) + trainable_start_block = hparams.get("trainable_start_block", 10) + for k, v in self.bert.named_parameters(): + if 'embeddings' in k: + v.requires_grad = False + elif 'encoder.layer' in k: + block_idx = int(k.split(".")[3]) + if block_idx < trainable_start_block: + v.requires_grad = False + else: + v.requires_grad = True + elif 'cls' in k: + v.requires_grad = True + else: + print("Unhandled key: {}, set to requires_grad...".format(k)) + v.requires_grad = True + + self.bert_combine = nn.Sequential(*[ + nn.Conv1d(768 + hidden_channels, hidden_channels, 3, 1, 1), + nn.ReLU(), + ]) + self.pooler = Pooler("avg") + self.sim = Similarity(temp=0.05) + + def forward(self, x, x_mask=None, bert_feats=None, ph2word=None, **kwargs): + if self.n_vocab > 0: + x_lengths = (x > 0).long().sum(-1) + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + else: + x_lengths = (x.abs().sum(-1) > 0).long().sum(-1) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + + if self.prenet: + x = self.pre(x, x_mask) + x = self.encoder1(x, x_mask) + bert_outputs = self.bert.bert(bert_feats['bert_input_ids'], + attention_mask=bert_feats['bert_attention_mask'], + token_type_ids=bert_feats['bert_token_type_ids'],) + bert_embedding = bert_outputs['last_hidden_state'] + grad_bert = hparams.get("grad_bert", 0.1) + bert_embedding = bert_embedding.detach() * (1-grad_bert) + bert_embedding * grad_bert + bert_word_embedding = group_hidden_by_segs(bert_embedding, bert_feats['bert_token2word'], bert_feats['bert_token2word'].max().item()) + bert_ph_embedding = postprocess_word2ph(bert_word_embedding, ph2word) + bert_ph_embedding = bert_ph_embedding.transpose(1,2) + x = torch.cat([x, bert_ph_embedding], dim=1) + x = self.bert_combine(x) + x = self.encoder2(x, x_mask) + return x.transpose(1, 2) + + diff --git a/text_to_speech/modules/commons/rnn.py b/text_to_speech/modules/commons/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..205c2c76b8fda2de920bc59228a5eec0a20119a9 --- /dev/null +++ b/text_to_speech/modules/commons/rnn.py @@ -0,0 +1,261 @@ +import torch +from torch import nn +import torch.nn.functional as F + + +class PreNet(nn.Module): + def __init__(self, in_dims, fc1_dims=256, fc2_dims=128, dropout=0.5): + super().__init__() + self.fc1 = nn.Linear(in_dims, fc1_dims) + self.fc2 = nn.Linear(fc1_dims, fc2_dims) + self.p = dropout + + def forward(self, x): + x = self.fc1(x) + x = F.relu(x) + x = F.dropout(x, self.p, training=self.training) + x = self.fc2(x) + x = F.relu(x) + x = F.dropout(x, self.p, training=self.training) + return x + + +class HighwayNetwork(nn.Module): + def __init__(self, size): + super().__init__() + self.W1 = nn.Linear(size, size) + self.W2 = nn.Linear(size, size) + self.W1.bias.data.fill_(0.) + + def forward(self, x): + x1 = self.W1(x) + x2 = self.W2(x) + g = torch.sigmoid(x2) + y = g * F.relu(x1) + (1. - g) * x + return y + + +class BatchNormConv(nn.Module): + def __init__(self, in_channels, out_channels, kernel, relu=True): + super().__init__() + self.conv = nn.Conv1d(in_channels, out_channels, kernel, stride=1, padding=kernel // 2, bias=False) + self.bnorm = nn.BatchNorm1d(out_channels) + self.relu = relu + + def forward(self, x): + x = self.conv(x) + x = F.relu(x) if self.relu is True else x + return self.bnorm(x) + + +class ConvNorm(torch.nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, + padding=None, dilation=1, bias=True, w_init_gain='linear'): + super(ConvNorm, self).__init__() + if padding is None: + assert (kernel_size % 2 == 1) + padding = int(dilation * (kernel_size - 1) / 2) + + self.conv = torch.nn.Conv1d(in_channels, out_channels, + kernel_size=kernel_size, stride=stride, + padding=padding, dilation=dilation, + bias=bias) + + torch.nn.init.xavier_uniform_( + self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) + + def forward(self, signal): + conv_signal = self.conv(signal) + return conv_signal + + +class CBHG(nn.Module): + def __init__(self, K, in_channels, channels, proj_channels, num_highways): + super().__init__() + + # List of all rnns to call `flatten_parameters()` on + self._to_flatten = [] + + self.bank_kernels = [i for i in range(1, K + 1)] + self.conv1d_bank = nn.ModuleList() + for k in self.bank_kernels: + conv = BatchNormConv(in_channels, channels, k) + self.conv1d_bank.append(conv) + + self.maxpool = nn.MaxPool1d(kernel_size=2, stride=1, padding=1) + + self.conv_project1 = BatchNormConv(len(self.bank_kernels) * channels, proj_channels[0], 3) + self.conv_project2 = BatchNormConv(proj_channels[0], proj_channels[1], 3, relu=False) + + # Fix the highway input if necessary + if proj_channels[-1] != channels: + self.highway_mismatch = True + self.pre_highway = nn.Linear(proj_channels[-1], channels, bias=False) + else: + self.highway_mismatch = False + + self.highways = nn.ModuleList() + for i in range(num_highways): + hn = HighwayNetwork(channels) + self.highways.append(hn) + + self.rnn = nn.GRU(channels, channels, batch_first=True, bidirectional=True) + self._to_flatten.append(self.rnn) + + # Avoid fragmentation of RNN parameters and associated warning + self._flatten_parameters() + + def forward(self, x): + # Although we `_flatten_parameters()` on init, when using DataParallel + # the model gets replicated, making it no longer guaranteed that the + # weights are contiguous in GPU memory. Hence, we must call it again + self._flatten_parameters() + + # Save these for later + residual = x + seq_len = x.size(-1) + conv_bank = [] + + # Convolution Bank + for conv in self.conv1d_bank: + c = conv(x) # Convolution + conv_bank.append(c[:, :, :seq_len]) + + # Stack along the channel axis + conv_bank = torch.cat(conv_bank, dim=1) + + # dump the last padding to fit residual + x = self.maxpool(conv_bank)[:, :, :seq_len] + + # Conv1d projections + x = self.conv_project1(x) + x = self.conv_project2(x) + + # Residual Connect + x = x + residual + + # Through the highways + x = x.transpose(1, 2) + if self.highway_mismatch is True: + x = self.pre_highway(x) + for h in self.highways: + x = h(x) + + # And then the RNN + x, _ = self.rnn(x) + return x + + def _flatten_parameters(self): + """Calls `flatten_parameters` on all the rnns used by the WaveRNN. Used + to improve efficiency and avoid PyTorch yelling at us.""" + [m.flatten_parameters() for m in self._to_flatten] + + +class TacotronEncoder(nn.Module): + def __init__(self, embed_dims, num_chars, cbhg_channels, K, num_highways, dropout): + super().__init__() + self.embedding = nn.Embedding(num_chars, embed_dims) + self.pre_net = PreNet(embed_dims, embed_dims, embed_dims, dropout=dropout) + self.cbhg = CBHG(K=K, in_channels=cbhg_channels, channels=cbhg_channels, + proj_channels=[cbhg_channels, cbhg_channels], + num_highways=num_highways) + self.proj_out = nn.Linear(cbhg_channels * 2, cbhg_channels) + + def forward(self, x): + x = self.embedding(x) + x = self.pre_net(x) + x.transpose_(1, 2) + x = self.cbhg(x) + x = self.proj_out(x) + return x + + +class RNNEncoder(nn.Module): + def __init__(self, num_chars, embedding_dim, n_convolutions=3, kernel_size=5): + super(RNNEncoder, self).__init__() + self.embedding = nn.Embedding(num_chars, embedding_dim, padding_idx=0) + convolutions = [] + for _ in range(n_convolutions): + conv_layer = nn.Sequential( + ConvNorm(embedding_dim, + embedding_dim, + kernel_size=kernel_size, stride=1, + padding=int((kernel_size - 1) / 2), + dilation=1, w_init_gain='relu'), + nn.BatchNorm1d(embedding_dim)) + convolutions.append(conv_layer) + self.convolutions = nn.ModuleList(convolutions) + + self.lstm = nn.LSTM(embedding_dim, int(embedding_dim / 2), 1, + batch_first=True, bidirectional=True) + + def forward(self, x): + input_lengths = (x > 0).sum(-1) + input_lengths = input_lengths.cpu().numpy() + + x = self.embedding(x) + x = x.transpose(1, 2) # [B, H, T] + for conv in self.convolutions: + x = F.dropout(F.relu(conv(x)), 0.5, self.training) + x + x = x.transpose(1, 2) # [B, T, H] + + # pytorch tensor are not reversible, hence the conversion + x = nn.utils.rnn.pack_padded_sequence(x, input_lengths, batch_first=True, enforce_sorted=False) + + self.lstm.flatten_parameters() + outputs, _ = self.lstm(x) + outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True) + + return outputs + + +class DecoderRNN(torch.nn.Module): + def __init__(self, hidden_size, decoder_rnn_dim, dropout): + super(DecoderRNN, self).__init__() + self.in_conv1d = nn.Sequential( + torch.nn.Conv1d( + in_channels=hidden_size, + out_channels=hidden_size, + kernel_size=9, padding=4, + ), + torch.nn.ReLU(), + torch.nn.Conv1d( + in_channels=hidden_size, + out_channels=hidden_size, + kernel_size=9, padding=4, + ), + ) + self.ln = nn.LayerNorm(hidden_size) + if decoder_rnn_dim == 0: + decoder_rnn_dim = hidden_size * 2 + self.rnn = torch.nn.LSTM( + input_size=hidden_size, + hidden_size=decoder_rnn_dim, + num_layers=1, + batch_first=True, + bidirectional=True, + dropout=dropout + ) + self.rnn.flatten_parameters() + self.conv1d = torch.nn.Conv1d( + in_channels=decoder_rnn_dim * 2, + out_channels=hidden_size, + kernel_size=3, + padding=1, + ) + + def forward(self, x): + input_masks = x.abs().sum(-1).ne(0).data[:, :, None] + input_lengths = input_masks.sum([-1, -2]) + input_lengths = input_lengths.cpu().numpy() + + x = self.in_conv1d(x.transpose(1, 2)).transpose(1, 2) + x = self.ln(x) + x = nn.utils.rnn.pack_padded_sequence(x, input_lengths, batch_first=True, enforce_sorted=False) + self.rnn.flatten_parameters() + x, _ = self.rnn(x) # [B, T, C] + x, _ = nn.utils.rnn.pad_packed_sequence(x, batch_first=True) + x = x * input_masks + pre_mel = self.conv1d(x.transpose(1, 2)).transpose(1, 2) # [B, T, C] + pre_mel = pre_mel * input_masks + return pre_mel diff --git a/text_to_speech/modules/commons/transformer.py b/text_to_speech/modules/commons/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..a7920480cf23606128e6662511cfd7a8a3d9f896 --- /dev/null +++ b/text_to_speech/modules/commons/transformer.py @@ -0,0 +1,747 @@ +import math +import torch +from torch import nn +from torch.nn import Parameter, Linear +from text_to_speech.modules.commons.layers import LayerNorm, Embedding +from text_to_speech.utils.nn.seq_utils import get_incremental_state, set_incremental_state, softmax, make_positions +import torch.nn.functional as F + +DEFAULT_MAX_SOURCE_POSITIONS = 2000 +DEFAULT_MAX_TARGET_POSITIONS = 2000 + + +class SinusoidalPositionalEmbedding(nn.Module): + """This module produces sinusoidal positional embeddings of any length. + + Padding symbols are ignored. + """ + + def __init__(self, embedding_dim, padding_idx, init_size=1024): + super().__init__() + self.embedding_dim = embedding_dim + self.padding_idx = padding_idx + self.weights = SinusoidalPositionalEmbedding.get_embedding( + init_size, + embedding_dim, + padding_idx, + ) + self.register_buffer('_float_tensor', torch.FloatTensor(1)) + + @staticmethod + def get_embedding(num_embeddings, embedding_dim, padding_idx=None): + """Build sinusoidal embeddings. + + This matches the implementation in tensor2tensor, but differs slightly + from the description in Section 3.5 of "Attention Is All You Need". + """ + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) + emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) + if embedding_dim % 2 == 1: + # zero pad + emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) + if padding_idx is not None: + emb[padding_idx, :] = 0 + return emb + + def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs): + """Input is expected to be of size [bsz x seqlen].""" + bsz, seq_len = input.shape[:2] + max_pos = self.padding_idx + 1 + seq_len + if self.weights is None or max_pos > self.weights.size(0): + # recompute/expand embeddings if needed + self.weights = SinusoidalPositionalEmbedding.get_embedding( + max_pos, + self.embedding_dim, + self.padding_idx, + ) + self.weights = self.weights.to(self._float_tensor) + + if incremental_state is not None: + # positions is the same for every token when decoding a single step + pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len + return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) + + positions = make_positions(input, self.padding_idx) if positions is None else positions + return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach() + + def max_positions(self): + """Maximum number of supported positions.""" + return int(1e5) # an arbitrary large number + + +class TransformerFFNLayer(nn.Module): + def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0., act='gelu'): + super().__init__() + self.kernel_size = kernel_size + self.dropout = dropout + self.act = act + if padding == 'SAME': + self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2) + elif padding == 'LEFT': + self.ffn_1 = nn.Sequential( + nn.ConstantPad1d((kernel_size - 1, 0), 0.0), + nn.Conv1d(hidden_size, filter_size, kernel_size) + ) + self.ffn_2 = Linear(filter_size, hidden_size) + + def forward(self, x, incremental_state=None): + # x: T x B x C + if incremental_state is not None: + saved_state = self._get_input_buffer(incremental_state) + if 'prev_input' in saved_state: + prev_input = saved_state['prev_input'] + x = torch.cat((prev_input, x), dim=0) + x = x[-self.kernel_size:] + saved_state['prev_input'] = x + self._set_input_buffer(incremental_state, saved_state) + + x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1) + x = x * self.kernel_size ** -0.5 + + if incremental_state is not None: + x = x[-1:] + if self.act == 'gelu': + x = F.gelu(x) + if self.act == 'relu': + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + x = self.ffn_2(x) + return x + + def _get_input_buffer(self, incremental_state): + return get_incremental_state( + self, + incremental_state, + 'f', + ) or {} + + def _set_input_buffer(self, incremental_state, buffer): + set_incremental_state( + self, + incremental_state, + 'f', + buffer, + ) + + def clear_buffer(self, incremental_state): + if incremental_state is not None: + saved_state = self._get_input_buffer(incremental_state) + if 'prev_input' in saved_state: + del saved_state['prev_input'] + self._set_input_buffer(incremental_state, saved_state) + + +class MultiheadAttention(nn.Module): + def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True, + add_bias_kv=False, add_zero_attn=False, self_attention=False, + encoder_decoder_attention=False): + super().__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + self.scaling = self.head_dim ** -0.5 + + self.self_attention = self_attention + self.encoder_decoder_attention = encoder_decoder_attention + + assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \ + 'value to be of the same size' + + if self.qkv_same_dim: + self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) + else: + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + + if bias: + self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + if add_bias_kv: + self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) + self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self.reset_parameters() + + self.enable_torch_version = False + if hasattr(F, "multi_head_attention_forward"): + self.enable_torch_version = True + else: + self.enable_torch_version = False + self.last_attn_probs = None + + def reset_parameters(self): + if self.qkv_same_dim: + nn.init.xavier_uniform_(self.in_proj_weight) + else: + nn.init.xavier_uniform_(self.k_proj_weight) + nn.init.xavier_uniform_(self.v_proj_weight) + nn.init.xavier_uniform_(self.q_proj_weight) + + nn.init.xavier_uniform_(self.out_proj.weight) + if self.in_proj_bias is not None: + nn.init.constant_(self.in_proj_bias, 0.) + nn.init.constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + nn.init.xavier_normal_(self.bias_k) + if self.bias_v is not None: + nn.init.xavier_normal_(self.bias_v) + + def forward( + self, + query, key, value, + key_padding_mask=None, + incremental_state=None, + need_weights=True, + static_kv=False, + attn_mask=None, + before_softmax=False, + need_head_weights=False, + enc_dec_attn_constraint_mask=None, + reset_attn_weight=None + ): + """Input shape: Time x Batch x Channel + + Args: + key_padding_mask (ByteTensor, optional): mask to exclude + keys that are pads, of shape `(batch, src_len)`, where + padding elements are indicated by 1s. + need_weights (bool, optional): return the attention weights, + averaged over heads (default: False). + attn_mask (ByteTensor, optional): typically used to + implement causal attention, where the mask prevents the + attention from looking forward in time (default: None). + before_softmax (bool, optional): return the raw attention + weights and values before the attention softmax. + need_head_weights (bool, optional): return the attention + weights for each head. Implies *need_weights*. Default: + return the average attention weights over all heads. + """ + if need_head_weights: + need_weights = True + + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == self.embed_dim + assert list(query.size()) == [tgt_len, bsz, embed_dim] + if self.enable_torch_version and incremental_state is None and not static_kv and reset_attn_weight is None: + if self.qkv_same_dim: + return F.multi_head_attention_forward(query, key, value, + self.embed_dim, self.num_heads, + self.in_proj_weight, + self.in_proj_bias, self.bias_k, self.bias_v, + self.add_zero_attn, self.dropout, + self.out_proj.weight, self.out_proj.bias, + self.training, key_padding_mask, need_weights, + attn_mask) + else: + return F.multi_head_attention_forward(query, key, value, + self.embed_dim, self.num_heads, + torch.empty([0]), + self.in_proj_bias, self.bias_k, self.bias_v, + self.add_zero_attn, self.dropout, + self.out_proj.weight, self.out_proj.bias, + self.training, key_padding_mask, need_weights, + attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, + k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight) + + if incremental_state is not None: + saved_state = self._get_input_buffer(incremental_state) + if 'prev_key' in saved_state: + # previous time steps are cached - no need to recompute + # key and value if they are static + if static_kv: + assert self.encoder_decoder_attention and not self.self_attention + key = value = None + else: + saved_state = None + + if self.self_attention: + # self-attention + q, k, v = self.in_proj_qkv(query) + elif self.encoder_decoder_attention: + # encoder-decoder attention + q = self.in_proj_q(query) + if key is None: + assert value is None + k = v = None + else: + k = self.in_proj_k(key) + v = self.in_proj_v(key) + + else: + q = self.in_proj_q(query) + k = self.in_proj_k(key) + v = self.in_proj_v(value) + q *= self.scaling + + if self.bias_k is not None: + assert self.bias_v is not None + k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1) + if key_padding_mask is not None: + key_padding_mask = torch.cat( + [key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1) + + q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) + + if saved_state is not None: + # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) + if 'prev_key' in saved_state: + prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim) + if static_kv: + k = prev_key + else: + k = torch.cat((prev_key, k), dim=1) + if 'prev_value' in saved_state: + prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim) + if static_kv: + v = prev_value + else: + v = torch.cat((prev_value, v), dim=1) + if 'prev_key_padding_mask' in saved_state and saved_state['prev_key_padding_mask'] is not None: + prev_key_padding_mask = saved_state['prev_key_padding_mask'] + if static_kv: + key_padding_mask = prev_key_padding_mask + else: + key_padding_mask = torch.cat((prev_key_padding_mask, key_padding_mask), dim=1) + + saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.head_dim) + saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim) + saved_state['prev_key_padding_mask'] = key_padding_mask + + self._set_input_buffer(incremental_state, saved_state) + + src_len = k.size(1) + + # This is part of a workaround to get around fork/join parallelism + # not supporting Optional types. + if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]): + key_padding_mask = None + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if self.add_zero_attn: + src_len += 1 + k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) + v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) + if attn_mask is not None: + attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1) + if key_padding_mask is not None: + key_padding_mask = torch.cat( + [key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1) + + attn_weights = torch.bmm(q, k.transpose(1, 2)) + attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) + + assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] + + if attn_mask is not None: + if len(attn_mask.shape) == 2: + attn_mask = attn_mask.unsqueeze(0) + elif len(attn_mask.shape) == 3: + attn_mask = attn_mask[:, None].repeat([1, self.num_heads, 1, 1]).reshape( + bsz * self.num_heads, tgt_len, src_len) + attn_weights = attn_weights + attn_mask + + if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.masked_fill( + enc_dec_attn_constraint_mask.unsqueeze(2).bool(), + -1e8, + ) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if key_padding_mask is not None: + # don't attend to padding symbols + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + -1e8, + ) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + + if before_softmax: + return attn_weights, v + + attn_weights_float = softmax(attn_weights, dim=-1) + attn_weights = attn_weights_float.type_as(attn_weights) + attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training) + + if reset_attn_weight is not None: + if reset_attn_weight: + self.last_attn_probs = attn_probs.detach() + else: + assert self.last_attn_probs is not None + attn_probs = self.last_attn_probs + attn = torch.bmm(attn_probs, v) + assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] + attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn = self.out_proj(attn) + + if need_weights: + attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0) + if not need_head_weights: + # average attention weights over heads + attn_weights = attn_weights.mean(dim=0) + else: + attn_weights = None + + return attn, (attn_weights, attn_logits) + + def in_proj_qkv(self, query): + return self._in_proj(query).chunk(3, dim=-1) + + def in_proj_q(self, query): + if self.qkv_same_dim: + return self._in_proj(query, end=self.embed_dim) + else: + bias = self.in_proj_bias + if bias is not None: + bias = bias[:self.embed_dim] + return F.linear(query, self.q_proj_weight, bias) + + def in_proj_k(self, key): + if self.qkv_same_dim: + return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) + else: + weight = self.k_proj_weight + bias = self.in_proj_bias + if bias is not None: + bias = bias[self.embed_dim:2 * self.embed_dim] + return F.linear(key, weight, bias) + + def in_proj_v(self, value): + if self.qkv_same_dim: + return self._in_proj(value, start=2 * self.embed_dim) + else: + weight = self.v_proj_weight + bias = self.in_proj_bias + if bias is not None: + bias = bias[2 * self.embed_dim:] + return F.linear(value, weight, bias) + + def _in_proj(self, input, start=0, end=None): + weight = self.in_proj_weight + bias = self.in_proj_bias + weight = weight[start:end, :] + if bias is not None: + bias = bias[start:end] + return F.linear(input, weight, bias) + + def _get_input_buffer(self, incremental_state): + return get_incremental_state( + self, + incremental_state, + 'attn_state', + ) or {} + + def _set_input_buffer(self, incremental_state, buffer): + set_incremental_state( + self, + incremental_state, + 'attn_state', + buffer, + ) + + def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz): + return attn_weights + + def clear_buffer(self, incremental_state=None): + if incremental_state is not None: + saved_state = self._get_input_buffer(incremental_state) + if 'prev_key' in saved_state: + del saved_state['prev_key'] + if 'prev_value' in saved_state: + del saved_state['prev_value'] + self._set_input_buffer(incremental_state, saved_state) + + +class EncSALayer(nn.Module): + def __init__(self, c, num_heads, dropout, attention_dropout=0.1, + relu_dropout=0.1, kernel_size=9, padding='SAME', act='gelu'): + super().__init__() + self.c = c + self.dropout = dropout + self.num_heads = num_heads + if num_heads > 0: + self.layer_norm1 = LayerNorm(c) + self.self_attn = MultiheadAttention( + self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False) + self.layer_norm2 = LayerNorm(c) + self.ffn = TransformerFFNLayer( + c, 4 * c, kernel_size=kernel_size, dropout=relu_dropout, padding=padding, act=act) + + def forward(self, x, encoder_padding_mask=None, **kwargs): + layer_norm_training = kwargs.get('layer_norm_training', None) + if layer_norm_training is not None: + self.layer_norm1.training = layer_norm_training + self.layer_norm2.training = layer_norm_training + if self.num_heads > 0: + residual = x + x = self.layer_norm1(x) + x, _, = self.self_attn( + query=x, + key=x, + value=x, + key_padding_mask=encoder_padding_mask + ) + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None] + + residual = x + x = self.layer_norm2(x) + x = self.ffn(x) + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None] + return x + + +class DecSALayer(nn.Module): + def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1, + kernel_size=9, act='gelu'): + super().__init__() + self.c = c + self.dropout = dropout + self.layer_norm1 = LayerNorm(c) + self.self_attn = MultiheadAttention( + c, num_heads, self_attention=True, dropout=attention_dropout, bias=False + ) + self.layer_norm2 = LayerNorm(c) + self.encoder_attn = MultiheadAttention( + c, num_heads, encoder_decoder_attention=True, dropout=attention_dropout, bias=False, + ) + self.layer_norm3 = LayerNorm(c) + self.ffn = TransformerFFNLayer( + c, 4 * c, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout, act=act) + + def forward( + self, + x, + encoder_out=None, + encoder_padding_mask=None, + incremental_state=None, + self_attn_mask=None, + self_attn_padding_mask=None, + attn_out=None, + reset_attn_weight=None, + **kwargs, + ): + layer_norm_training = kwargs.get('layer_norm_training', None) + if layer_norm_training is not None: + self.layer_norm1.training = layer_norm_training + self.layer_norm2.training = layer_norm_training + self.layer_norm3.training = layer_norm_training + residual = x + x = self.layer_norm1(x) + x, _ = self.self_attn( + query=x, + key=x, + value=x, + key_padding_mask=self_attn_padding_mask, + incremental_state=incremental_state, + attn_mask=self_attn_mask + ) + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + + attn_logits = None + if encoder_out is not None or attn_out is not None: + residual = x + x = self.layer_norm2(x) + if encoder_out is not None: + x, attn = self.encoder_attn( + query=x, + key=encoder_out, + value=encoder_out, + key_padding_mask=encoder_padding_mask, + incremental_state=incremental_state, + static_kv=True, + enc_dec_attn_constraint_mask=get_incremental_state(self, incremental_state, + 'enc_dec_attn_constraint_mask'), + reset_attn_weight=reset_attn_weight + ) + attn_logits = attn[1] + elif attn_out is not None: + x = self.encoder_attn.in_proj_v(attn_out) + if encoder_out is not None or attn_out is not None: + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + + residual = x + x = self.layer_norm3(x) + x = self.ffn(x, incremental_state=incremental_state) + x = F.dropout(x, self.dropout, training=self.training) + x = residual + x + return x, attn_logits + + def clear_buffer(self, input, encoder_out=None, encoder_padding_mask=None, incremental_state=None): + self.encoder_attn.clear_buffer(incremental_state) + self.ffn.clear_buffer(incremental_state) + + def set_buffer(self, name, tensor, incremental_state): + return set_incremental_state(self, incremental_state, name, tensor) + + +class TransformerEncoderLayer(nn.Module): + def __init__(self, hidden_size, dropout, kernel_size=9, num_heads=2): + super().__init__() + self.hidden_size = hidden_size + self.dropout = dropout + self.num_heads = num_heads + self.op = EncSALayer( + hidden_size, num_heads, dropout=dropout, + attention_dropout=0.0, relu_dropout=dropout, + kernel_size=kernel_size) + + def forward(self, x, **kwargs): + return self.op(x, **kwargs) + + +class TransformerDecoderLayer(nn.Module): + def __init__(self, hidden_size, dropout, kernel_size=9, num_heads=2): + super().__init__() + self.hidden_size = hidden_size + self.dropout = dropout + self.num_heads = num_heads + self.op = DecSALayer( + hidden_size, num_heads, dropout=dropout, + attention_dropout=0.0, relu_dropout=dropout, + kernel_size=kernel_size) + + def forward(self, x, **kwargs): + return self.op(x, **kwargs) + + def clear_buffer(self, *args): + return self.op.clear_buffer(*args) + + def set_buffer(self, *args): + return self.op.set_buffer(*args) + + +class FFTBlocks(nn.Module): + def __init__(self, hidden_size, num_layers, ffn_kernel_size=9, dropout=0.0, + num_heads=2, use_pos_embed=True, use_last_norm=True, + use_pos_embed_alpha=True): + super().__init__() + self.num_layers = num_layers + embed_dim = self.hidden_size = hidden_size + self.dropout = dropout + self.use_pos_embed = use_pos_embed + self.use_last_norm = use_last_norm + if use_pos_embed: + self.max_source_positions = DEFAULT_MAX_TARGET_POSITIONS + self.padding_idx = 0 + self.pos_embed_alpha = nn.Parameter(torch.Tensor([1])) if use_pos_embed_alpha else 1 + self.embed_positions = SinusoidalPositionalEmbedding( + embed_dim, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS, + ) + + self.layers = nn.ModuleList([]) + self.layers.extend([ + TransformerEncoderLayer(self.hidden_size, self.dropout, + kernel_size=ffn_kernel_size, num_heads=num_heads) + for _ in range(self.num_layers) + ]) + if self.use_last_norm: + self.layer_norm = nn.LayerNorm(embed_dim) + else: + self.layer_norm = None + + def forward(self, x, padding_mask=None, attn_mask=None, return_hiddens=False): + """ + :param x: [B, T, C] + :param padding_mask: [B, T] + :return: [B, T, C] or [L, B, T, C] + """ + padding_mask = x.abs().sum(-1).eq(0).data if padding_mask is None else padding_mask + nonpadding_mask_TB = 1 - padding_mask.transpose(0, 1).float()[:, :, None] # [T, B, 1] + if self.use_pos_embed: + positions = self.pos_embed_alpha * self.embed_positions(x[..., 0]) + x = x + positions + x = F.dropout(x, p=self.dropout, training=self.training) + # B x T x C -> T x B x C + x = x.transpose(0, 1) * nonpadding_mask_TB + hiddens = [] + for layer in self.layers: + x = layer(x, encoder_padding_mask=padding_mask, attn_mask=attn_mask) * nonpadding_mask_TB + hiddens.append(x) + if self.use_last_norm: + x = self.layer_norm(x) * nonpadding_mask_TB + if return_hiddens: + x = torch.stack(hiddens, 0) # [L, T, B, C] + x = x.transpose(1, 2) # [L, B, T, C] + else: + x = x.transpose(0, 1) # [B, T, C] + return x + + +class FastSpeechEncoder(FFTBlocks): + def __init__(self, dict_size, hidden_size=256, num_layers=4, kernel_size=9, num_heads=2, + dropout=0.0): + super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads, + use_pos_embed=False, dropout=dropout) # use_pos_embed_alpha for compatibility + self.embed_tokens = Embedding(dict_size, hidden_size, 0) + self.embed_scale = math.sqrt(hidden_size) + self.padding_idx = 0 + self.embed_positions = SinusoidalPositionalEmbedding( + hidden_size, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS, + ) + + def forward(self, txt_tokens, attn_mask=None): + """ + + :param txt_tokens: [B, T] + :return: { + 'encoder_out': [B x T x C] + } + """ + encoder_padding_mask = txt_tokens.eq(self.padding_idx).data + x = self.forward_embedding(txt_tokens) # [B, T, H] + if self.num_layers > 0: + x = super(FastSpeechEncoder, self).forward(x, encoder_padding_mask, attn_mask=attn_mask) + return x + + def forward_embedding(self, txt_tokens): + # embed tokens and positions + x = self.embed_scale * self.embed_tokens(txt_tokens) + if self.use_pos_embed: + positions = self.embed_positions(txt_tokens) + x = x + positions + x = F.dropout(x, p=self.dropout, training=self.training) + return x + + +class FastSpeechDecoder(FFTBlocks): + def __init__(self, hidden_size=256, num_layers=4, kernel_size=9, num_heads=2): + super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads) diff --git a/text_to_speech/modules/commons/wavenet.py b/text_to_speech/modules/commons/wavenet.py new file mode 100644 index 0000000000000000000000000000000000000000..7809c9b9d3331ba4fd2ffd4caae14e721e4b0732 --- /dev/null +++ b/text_to_speech/modules/commons/wavenet.py @@ -0,0 +1,97 @@ +import torch +from torch import nn + + +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + +class WN(torch.nn.Module): + def __init__(self, hidden_size, kernel_size, dilation_rate, n_layers, c_cond=0, + p_dropout=0, share_cond_layers=False, is_BTC=False): + super(WN, self).__init__() + assert (kernel_size % 2 == 1) + assert (hidden_size % 2 == 0) + self.is_BTC = is_BTC + self.hidden_size = hidden_size + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = c_cond + self.p_dropout = p_dropout + self.share_cond_layers = share_cond_layers + + self.in_layers = torch.nn.ModuleList() + self.res_skip_layers = torch.nn.ModuleList() + self.drop = nn.Dropout(p_dropout) + + if c_cond != 0 and not share_cond_layers: + cond_layer = torch.nn.Conv1d(c_cond, 2 * hidden_size * n_layers, 1) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') + + for i in range(n_layers): + dilation = dilation_rate ** i + padding = int((kernel_size * dilation - dilation) / 2) + in_layer = torch.nn.Conv1d(hidden_size, 2 * hidden_size, kernel_size, + dilation=dilation, padding=padding) + in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') + self.in_layers.append(in_layer) + + # last one is not necessary + if i < n_layers - 1: + res_skip_channels = 2 * hidden_size + else: + res_skip_channels = hidden_size + + res_skip_layer = torch.nn.Conv1d(hidden_size, res_skip_channels, 1) + res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') + self.res_skip_layers.append(res_skip_layer) + + def forward(self, x, nonpadding=None, cond=None): + if self.is_BTC: + x = x.transpose(1, 2) + cond = cond.transpose(1, 2) if cond is not None else None + nonpadding = nonpadding.transpose(1, 2) if nonpadding is not None else None + if nonpadding is None: + nonpadding = 1 + output = torch.zeros_like(x) + n_channels_tensor = torch.IntTensor([self.hidden_size]) + + if cond is not None and not self.share_cond_layers: + cond = self.cond_layer(cond) + + for i in range(self.n_layers): + x_in = self.in_layers[i](x) + x_in = self.drop(x_in) + if cond is not None: + cond_offset = i * 2 * self.hidden_size + cond_l = cond[:, cond_offset:cond_offset + 2 * self.hidden_size, :] + else: + cond_l = torch.zeros_like(x_in) + + acts = fused_add_tanh_sigmoid_multiply(x_in, cond_l, n_channels_tensor) + + res_skip_acts = self.res_skip_layers[i](acts) + if i < self.n_layers - 1: + x = (x + res_skip_acts[:, :self.hidden_size, :]) * nonpadding + output = output + res_skip_acts[:, self.hidden_size:, :] + else: + output = output + res_skip_acts + output = output * nonpadding + if self.is_BTC: + output = output.transpose(1, 2) + return output + + def remove_weight_norm(self): + def remove_weight_norm(m): + try: + nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(remove_weight_norm) diff --git a/text_to_speech/modules/tts/__pycache__/fs.cpython-38.pyc b/text_to_speech/modules/tts/__pycache__/fs.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2dc59df6b8462a80d52885ec9af762f6a58660b Binary files /dev/null and b/text_to_speech/modules/tts/__pycache__/fs.cpython-38.pyc differ diff --git a/text_to_speech/modules/tts/commons/__pycache__/align_ops.cpython-38.pyc b/text_to_speech/modules/tts/commons/__pycache__/align_ops.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0d6b1997f529372b757095c57a032fb9d4e5d5e Binary files /dev/null and b/text_to_speech/modules/tts/commons/__pycache__/align_ops.cpython-38.pyc differ diff --git a/text_to_speech/modules/tts/commons/align_ops.py b/text_to_speech/modules/tts/commons/align_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a190d63a3f3ba31f41754975569336a87c63089d --- /dev/null +++ b/text_to_speech/modules/tts/commons/align_ops.py @@ -0,0 +1,25 @@ +import torch +import torch.nn.functional as F + + +def build_word_mask(x2word, y2word): + return (x2word[:, :, None] == y2word[:, None, :]).long() + + +def mel2ph_to_mel2word(mel2ph, ph2word): + mel2word = (ph2word - 1).gather(1, (mel2ph - 1).clamp(min=0)) + 1 + mel2word = mel2word * (mel2ph > 0).long() + return mel2word + + +def clip_mel2token_to_multiple(mel2token, frames_multiple): + max_frames = mel2token.shape[1] // frames_multiple * frames_multiple + mel2token = mel2token[:, :max_frames] + return mel2token + + +def expand_states(h, mel2token): + h = F.pad(h, [0, 0, 1, 0]) + mel2token_ = mel2token[..., None].repeat([1, 1, h.shape[-1]]) + h = torch.gather(h, 1, mel2token_) # [B, T, H] + return h diff --git a/text_to_speech/modules/tts/diffspeech/net.py b/text_to_speech/modules/tts/diffspeech/net.py new file mode 100644 index 0000000000000000000000000000000000000000..764020f28add5e4ee387a9d081ab6d548fc0f201 --- /dev/null +++ b/text_to_speech/modules/tts/diffspeech/net.py @@ -0,0 +1,110 @@ +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from math import sqrt + +Linear = nn.Linear +ConvTranspose2d = nn.ConvTranspose2d + + +class Mish(nn.Module): + def forward(self, x): + return x * torch.tanh(F.softplus(x)) + + +class SinusoidalPosEmb(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x): + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device) * -emb) + emb = x[:, None] * emb[None, :] + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +def Conv1d(*args, **kwargs): + layer = nn.Conv1d(*args, **kwargs) + nn.init.kaiming_normal_(layer.weight) + return layer + + +class ResidualBlock(nn.Module): + def __init__(self, encoder_hidden, residual_channels, dilation): + super().__init__() + self.dilated_conv = Conv1d(residual_channels, 2 * residual_channels, 3, padding=dilation, dilation=dilation) + self.diffusion_projection = Linear(residual_channels, residual_channels) + self.conditioner_projection = Conv1d(encoder_hidden, 2 * residual_channels, 1) + self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1) + + def forward(self, x, conditioner, diffusion_step): + diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1) + conditioner = self.conditioner_projection(conditioner) + y = x + diffusion_step + + y = self.dilated_conv(y) + conditioner + + gate, filter = torch.chunk(y, 2, dim=1) + y = torch.sigmoid(gate) * torch.tanh(filter) + + y = self.output_projection(y) + residual, skip = torch.chunk(y, 2, dim=1) + return (x + residual) / sqrt(2.0), skip + + +class DiffNet(nn.Module): + def __init__(self, hparams): + super().__init__() + in_dims = hparams['audio_num_mel_bins'] + self.encoder_hidden = hparams['hidden_size'] + self.residual_layers = hparams['residual_layers'] + self.residual_channels = hparams['residual_channels'] + self.dilation_cycle_length = hparams['dilation_cycle_length'] + + self.input_projection = Conv1d(in_dims, self.residual_channels, 1) + self.diffusion_embedding = SinusoidalPosEmb(self.residual_channels) + dim = self.residual_channels + self.mlp = nn.Sequential( + nn.Linear(dim, dim * 4), + Mish(), + nn.Linear(dim * 4, dim) + ) + self.residual_layers = nn.ModuleList([ + ResidualBlock(self.encoder_hidden, self.residual_channels, 2 ** (i % self.dilation_cycle_length)) + for i in range(self.residual_layers) + ]) + self.skip_projection = Conv1d(self.residual_channels, self.residual_channels, 1) + self.output_projection = Conv1d(self.residual_channels, in_dims, 1) + nn.init.zeros_(self.output_projection.weight) + + def forward(self, spec, diffusion_step, cond): + """ + + :param spec: [B, 1, M, T] + :param diffusion_step: [B, 1] + :param cond: [B, M, T] + :return: + """ + x = spec[:, 0] + x = self.input_projection(x) # x [B, residual_channel, T] + + x = F.relu(x) + diffusion_step = self.diffusion_embedding(diffusion_step) + diffusion_step = self.mlp(diffusion_step) + skip = [] + for layer_id, layer in enumerate(self.residual_layers): + x, skip_connection = layer(x, cond, diffusion_step) + skip.append(skip_connection) + + x = torch.sum(torch.stack(skip), dim=0) / sqrt(len(self.residual_layers)) + x = self.skip_projection(x) + x = F.relu(x) + x = self.output_projection(x) # [B, 80, T] + return x[:, None, :, :] diff --git a/text_to_speech/modules/tts/diffspeech/shallow_diffusion_tts.py b/text_to_speech/modules/tts/diffspeech/shallow_diffusion_tts.py new file mode 100644 index 0000000000000000000000000000000000000000..6901a9700aa3b274b78da6105ee33efb5df5a692 --- /dev/null +++ b/text_to_speech/modules/tts/diffspeech/shallow_diffusion_tts.py @@ -0,0 +1,279 @@ +import math +import random +from functools import partial +from inspect import isfunction +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn +from tqdm import tqdm + +from text_to_speech.modules.tts.fs2_orig import FastSpeech2Orig +from text_to_speech.modules.tts.diffspeech.net import DiffNet +from text_to_speech.modules.tts.commons.align_ops import expand_states + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +# gaussian diffusion trainer class + +def extract(a, t, x_shape): + b, *_ = t.shape + out = a.gather(-1, t) + return out.reshape(b, *((1,) * (len(x_shape) - 1))) + + +def noise_like(shape, device, repeat=False): + repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) + noise = lambda: torch.randn(shape, device=device) + return repeat_noise() if repeat else noise() + + +def linear_beta_schedule(timesteps, max_beta=0.01): + """ + linear schedule + """ + betas = np.linspace(1e-4, max_beta, timesteps) + return betas + + +def cosine_beta_schedule(timesteps, s=0.008): + """ + cosine schedule + as proposed in https://openreview.net/forum?id=-NEXDKk8gZ + """ + steps = timesteps + 1 + x = np.linspace(0, steps, steps) + alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2 + alphas_cumprod = alphas_cumprod / alphas_cumprod[0] + betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) + return np.clip(betas, a_min=0, a_max=0.999) + + +beta_schedule = { + "cosine": cosine_beta_schedule, + "linear": linear_beta_schedule, +} + + +DIFF_DECODERS = { + 'wavenet': lambda hp: DiffNet(hp), +} + + +class AuxModel(FastSpeech2Orig): + def forward(self, txt_tokens, mel2ph=None, spk_embed=None, spk_id=None, + f0=None, uv=None, energy=None, infer=False, **kwargs): + ret = {} + encoder_out = self.encoder(txt_tokens) # [B, T, C] + src_nonpadding = (txt_tokens > 0).float()[:, :, None] + style_embed = self.forward_style_embed(spk_embed, spk_id) + + # add dur + dur_inp = (encoder_out + style_embed) * src_nonpadding + mel2ph = self.forward_dur(dur_inp, mel2ph, txt_tokens, ret) + tgt_nonpadding = (mel2ph > 0).float()[:, :, None] + decoder_inp = decoder_inp_ = expand_states(encoder_out, mel2ph) + + # add pitch and energy embed + if self.hparams['use_pitch_embed']: + pitch_inp = (decoder_inp_ + style_embed) * tgt_nonpadding + decoder_inp = decoder_inp + self.forward_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out) + + # add pitch and energy embed + if self.hparams['use_energy_embed']: + energy_inp = (decoder_inp_ + style_embed) * tgt_nonpadding + decoder_inp = decoder_inp + self.forward_energy(energy_inp, energy, ret) + + # decoder input + ret['decoder_inp'] = decoder_inp = (decoder_inp + style_embed) * tgt_nonpadding + if self.hparams['dec_inp_add_noise']: + B, T, _ = decoder_inp.shape + z = kwargs.get('adv_z', torch.randn([B, T, self.z_channels])).to(decoder_inp.device) + ret['adv_z'] = z + decoder_inp = torch.cat([decoder_inp, z], -1) + decoder_inp = self.dec_inp_noise_proj(decoder_inp) * tgt_nonpadding + if kwargs['skip_decoder']: + return ret + ret['mel_out'] = self.forward_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs) + return ret + + +class GaussianDiffusion(nn.Module): + def __init__(self, dict_size, hparams, out_dims=None): + super().__init__() + self.hparams = hparams + out_dims = hparams['audio_num_mel_bins'] + denoise_fn = DIFF_DECODERS[hparams['diff_decoder_type']](hparams) + timesteps = hparams['timesteps'] + K_step = hparams['K_step'] + loss_type = hparams['diff_loss_type'] + spec_min = hparams['spec_min'] + spec_max = hparams['spec_max'] + + self.denoise_fn = denoise_fn + self.fs2 = AuxModel(dict_size, hparams) + self.mel_bins = out_dims + + if hparams['schedule_type'] == 'linear': + betas = linear_beta_schedule(timesteps, hparams['max_beta']) + else: + betas = cosine_beta_schedule(timesteps) + + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.K_step = K_step + self.loss_type = loss_type + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod) + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer('posterior_variance', to_torch(posterior_variance)) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) + self.register_buffer('posterior_mean_coef1', to_torch( + betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) + self.register_buffer('posterior_mean_coef2', to_torch( + (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) + + self.register_buffer('spec_min', torch.FloatTensor(spec_min)[None, None, :hparams['keep_bins']]) + self.register_buffer('spec_max', torch.FloatTensor(spec_max)[None, None, :hparams['keep_bins']]) + + def q_mean_variance(self, x_start, t): + mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + variance = extract(1. - self.alphas_cumprod, t, x_start.shape) + log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape) + return mean, variance, log_variance + + def predict_start_from_noise(self, x_t, t, noise): + return ( + extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - + extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise + ) + + def q_posterior(self, x_start, x_t, t): + posterior_mean = ( + extract(self.posterior_mean_coef1, t, x_t.shape) * x_start + + extract(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = extract(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance(self, x, t, cond, clip_denoised: bool): + noise_pred = self.denoise_fn(x, t, cond=cond) + x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred) + + if clip_denoised: + x_recon.clamp_(-1., 1.) + + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False): + b, *_, device = *x.shape, x.device + model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond, clip_denoised=clip_denoised) + noise = noise_like(x.shape, device, repeat_noise) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return ( + extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise + ) + + def p_losses(self, x_start, t, cond, noise=None, nonpadding=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + x_recon = self.denoise_fn(x_noisy, t, cond) + + if self.loss_type == 'l1': + if nonpadding is not None: + loss = ((noise - x_recon).abs() * nonpadding.unsqueeze(1)).mean() + else: + # print('are you sure w/o nonpadding?') + loss = (noise - x_recon).abs().mean() + + elif self.loss_type == 'l2': + loss = F.mse_loss(noise, x_recon) + else: + raise NotImplementedError() + + return loss + + def forward(self, txt_tokens, mel2ph=None, spk_embed=None, spk_id=None, + ref_mels=None, f0=None, uv=None, energy=None, infer=False, **kwargs): + b, *_, device = *txt_tokens.shape, txt_tokens.device + ret = self.fs2(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id, + f0=f0, uv=uv, energy=energy, infer=infer, skip_decoder=(not infer), **kwargs) + cond = ret['decoder_inp'].transpose(1, 2) + + if not infer: + t = torch.randint(0, self.K_step, (b,), device=device).long() + x = ref_mels + x = self.norm_spec(x) + x = x.transpose(1, 2)[:, None, :, :] # [B, 1, M, T] + ret['diff_loss'] = self.p_losses(x, t, cond) + # nonpadding = (mel2ph != 0).float() + # ret['diff_loss'] = self.p_losses(x, t, cond, nonpadding=nonpadding) + ret['mel_out'] = None + else: + ret['fs2_mel'] = ret['mel_out'] + fs2_mels = ret['mel_out'] + t = self.K_step + fs2_mels = self.norm_spec(fs2_mels) + fs2_mels = fs2_mels.transpose(1, 2)[:, None, :, :] + + x = self.q_sample(x_start=fs2_mels, t=torch.tensor([t - 1], device=device).long()) + if self.hparams.get('gaussian_start') is not None and self.hparams['gaussian_start']: + print('===> gaussian start.') + shape = (cond.shape[0], 1, self.mel_bins, cond.shape[2]) + x = torch.randn(shape, device=device) + for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t): + x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond) + x = x[:, 0].transpose(1, 2) + ret['mel_out'] = self.denorm_spec(x) + + return ret + + def norm_spec(self, x): + return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1 + + def denorm_spec(self, x): + return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min + + def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph): + return self.fs2.cwt2f0_norm(cwt_spec, mean, std, mel2ph) + + def out2mel(self, x): + return x \ No newline at end of file diff --git a/text_to_speech/modules/tts/fs.py b/text_to_speech/modules/tts/fs.py new file mode 100644 index 0000000000000000000000000000000000000000..477cae5e1b3760e80ceacfc6b09e7f6d6a3a61ca --- /dev/null +++ b/text_to_speech/modules/tts/fs.py @@ -0,0 +1,183 @@ +from copy import deepcopy + +import torch +from torch import nn +import torch.nn.functional as F +from text_to_speech.modules.commons.conv import TextConvEncoder, ConvBlocks +from text_to_speech.modules.commons.layers import Embedding +from text_to_speech.modules.commons.nar_tts_modules import PitchPredictor, DurationPredictor, LengthRegulator +from text_to_speech.modules.commons.rel_transformer import RelTransformerEncoder, BERTRelTransformerEncoder +from text_to_speech.modules.commons.rnn import TacotronEncoder, RNNEncoder, DecoderRNN +from text_to_speech.modules.commons.transformer import FastSpeechEncoder, FastSpeechDecoder +from text_to_speech.modules.commons.wavenet import WN +from text_to_speech.modules.tts.commons.align_ops import clip_mel2token_to_multiple, expand_states +from text_to_speech.utils.audio.pitch.utils import denorm_f0, f0_to_coarse + +FS_ENCODERS = { + 'fft': lambda hp, dict_size: FastSpeechEncoder( + dict_size, hp['hidden_size'], hp['enc_layers'], hp['enc_ffn_kernel_size'], + num_heads=hp['num_heads']), + 'tacotron': lambda hp, dict_size: TacotronEncoder( + hp['hidden_size'], dict_size, hp['hidden_size'], + K=hp['encoder_K'], num_highways=4, dropout=hp['dropout']), + 'tacotron2': lambda hp, dict_size: RNNEncoder(dict_size, hp['hidden_size']), + 'conv': lambda hp, dict_size: TextConvEncoder(dict_size, hp['hidden_size'], hp['hidden_size'], + hp['enc_dilations'], hp['enc_kernel_size'], + layers_in_block=hp['layers_in_block'], + norm_type=hp['enc_dec_norm'], + post_net_kernel=hp.get('enc_post_net_kernel', 3)), + 'rel_fft': lambda hp, dict_size: RelTransformerEncoder( + dict_size, hp['hidden_size'], hp['hidden_size'], + hp['ffn_hidden_size'], hp['num_heads'], hp['enc_layers'], + hp['enc_ffn_kernel_size'], hp['dropout'], prenet=hp['enc_prenet'], pre_ln=hp['enc_pre_ln']), +} + +FS_DECODERS = { + 'fft': lambda hp: FastSpeechDecoder( + hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']), + 'rnn': lambda hp: DecoderRNN(hp['hidden_size'], hp['decoder_rnn_dim'], hp['dropout']), + 'conv': lambda hp: ConvBlocks(hp['hidden_size'], hp['hidden_size'], hp['dec_dilations'], + hp['dec_kernel_size'], layers_in_block=hp['layers_in_block'], + norm_type=hp['enc_dec_norm'], dropout=hp['dropout'], + post_net_kernel=hp.get('dec_post_net_kernel', 3)), + 'wn': lambda hp: WN(hp['hidden_size'], kernel_size=5, dilation_rate=1, n_layers=hp['dec_layers'], + is_BTC=True), +} + + +class FastSpeech(nn.Module): + def __init__(self, dict_size, hparams, out_dims=None): + super().__init__() + self.hparams = deepcopy(hparams) + self.enc_layers = hparams['enc_layers'] + self.dec_layers = hparams['dec_layers'] + self.hidden_size = hparams['hidden_size'] + if hparams.get("use_bert") is True: + self.ph_encoder = BERTRelTransformerEncoder(dict_size, hparams['hidden_size'], hparams['hidden_size'], + hparams['ffn_hidden_size'], hparams['num_heads'], hparams['enc_layers'], + hparams['enc_ffn_kernel_size'], hparams['dropout'], prenet=hparams['enc_prenet'], pre_ln=hparams['enc_pre_ln']) + else: + self.ph_encoder = FS_ENCODERS[hparams['encoder_type']](hparams, dict_size) + self.decoder = FS_DECODERS[hparams['decoder_type']](hparams) + self.out_dims = hparams['audio_num_mel_bins'] if out_dims is None else out_dims + self.mel_out = nn.Linear(self.hidden_size, self.out_dims, bias=True) + if hparams['use_spk_id']: + self.spk_id_proj = Embedding(hparams['num_spk'], self.hidden_size) + if hparams['use_spk_embed']: + self.spk_embed_proj = nn.Linear(256, self.hidden_size, bias=True) + predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size + self.dur_predictor = DurationPredictor( + self.hidden_size, + n_chans=predictor_hidden, + n_layers=hparams['dur_predictor_layers'], + dropout_rate=hparams['predictor_dropout'], + kernel_size=hparams['dur_predictor_kernel']) + self.length_regulator = LengthRegulator() + if hparams['use_pitch_embed']: + self.pitch_embed = Embedding(300, self.hidden_size, 0) + self.pitch_predictor = PitchPredictor( + self.hidden_size, n_chans=predictor_hidden, + n_layers=5, dropout_rate=0.1, odim=2, + kernel_size=hparams['predictor_kernel']) + if hparams['dec_inp_add_noise']: + self.z_channels = hparams['z_channels'] + self.dec_inp_noise_proj = nn.Linear(self.hidden_size + self.z_channels, self.hidden_size) + + def forward(self, txt_tokens, mel2ph=None, spk_embed=None, spk_id=None, + f0=None, uv=None, infer=False, **kwargs): + ret = {} + src_nonpadding = (txt_tokens > 0).float()[:, :, None] + style_embed = self.forward_style_embed(spk_embed, spk_id) + + use_bert = self.hparams.get("use_bert") is True + if use_bert: + encoder_out = self.encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=kwargs['ph2word'], + ret=ret) * src_nonpadding + style_embed + else: + encoder_out = self.encoder(txt_tokens) * src_nonpadding + style_embed + + # add dur + dur_inp = (encoder_out + style_embed) * src_nonpadding + mel2ph = self.forward_dur(dur_inp, mel2ph, txt_tokens, ret) + tgt_nonpadding = (mel2ph > 0).float()[:, :, None] + decoder_inp = expand_states(encoder_out, mel2ph) + + # add pitch embed + if self.hparams['use_pitch_embed']: + pitch_inp = (decoder_inp + style_embed) * tgt_nonpadding + decoder_inp = decoder_inp + self.forward_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out) + + # decoder input + ret['decoder_inp'] = decoder_inp = (decoder_inp + style_embed) * tgt_nonpadding + if self.hparams['dec_inp_add_noise']: + B, T, _ = decoder_inp.shape + z = kwargs.get('adv_z', torch.randn([B, T, self.z_channels])).to(decoder_inp.device) + ret['adv_z'] = z + decoder_inp = torch.cat([decoder_inp, z], -1) + decoder_inp = self.dec_inp_noise_proj(decoder_inp) * tgt_nonpadding + ret['mel_out'] = self.forward_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs) + return ret + + def forward_style_embed(self, spk_embed=None, spk_id=None): + # add spk embed + style_embed = 0 + if self.hparams['use_spk_embed']: + style_embed = style_embed + self.spk_embed_proj(spk_embed)[:, None, :] + if self.hparams['use_spk_id']: + style_embed = style_embed + self.spk_id_proj(spk_id)[:, None, :] + return style_embed + + def forward_dur(self, dur_input, mel2ph, txt_tokens, ret): + """ + + :param dur_input: [B, T_txt, H] + :param mel2ph: [B, T_mel] + :param txt_tokens: [B, T_txt] + :param ret: + :return: + """ + src_padding = txt_tokens == 0 + if self.hparams['predictor_grad'] != 1: + dur_input = dur_input.detach() + self.hparams['predictor_grad'] * (dur_input - dur_input.detach()) + dur = self.dur_predictor(dur_input, src_padding) + ret['dur'] = dur + if mel2ph is None: + mel2ph = self.length_regulator(dur, src_padding).detach() + ret['mel2ph'] = mel2ph = clip_mel2token_to_multiple(mel2ph, self.hparams['frames_multiple']) + return mel2ph + + def forward_pitch(self, decoder_inp, f0, uv, mel2ph, ret, encoder_out=None): + if self.hparams['pitch_type'] == 'frame': + pitch_pred_inp = decoder_inp + pitch_padding = mel2ph == 0 + else: + pitch_pred_inp = encoder_out + pitch_padding = encoder_out.abs().sum(-1) == 0 + uv = None + if self.hparams['predictor_grad'] != 1: + pitch_pred_inp = pitch_pred_inp.detach() + \ + self.hparams['predictor_grad'] * (pitch_pred_inp - pitch_pred_inp.detach()) + ret['pitch_pred'] = pitch_pred = self.pitch_predictor(pitch_pred_inp) + use_uv = self.hparams['pitch_type'] == 'frame' and self.hparams['use_uv'] + if f0 is None: + f0 = pitch_pred[:, :, 0] + if use_uv: + uv = pitch_pred[:, :, 1] > 0 + f0_denorm = denorm_f0(f0, uv if use_uv else None, pitch_padding=pitch_padding) + pitch = f0_to_coarse(f0_denorm) # start from 0 [B, T_txt] + ret['f0_denorm'] = f0_denorm + ret['f0_denorm_pred'] = denorm_f0( + pitch_pred[:, :, 0], (pitch_pred[:, :, 1] > 0) if use_uv else None, + pitch_padding=pitch_padding) + if self.hparams['pitch_type'] == 'ph': + pitch = torch.gather(F.pad(pitch, [1, 0]), 1, mel2ph) + ret['f0_denorm'] = torch.gather(F.pad(ret['f0_denorm'], [1, 0]), 1, mel2ph) + ret['f0_denorm_pred'] = torch.gather(F.pad(ret['f0_denorm_pred'], [1, 0]), 1, mel2ph) + pitch_embed = self.pitch_embed(pitch) + return pitch_embed + + def forward_decoder(self, decoder_inp, tgt_nonpadding, ret, infer, **kwargs): + x = decoder_inp # [B, T, H] + x = self.decoder(x) + x = self.mel_out(x) + return x * tgt_nonpadding diff --git a/text_to_speech/modules/tts/fs2_orig.py b/text_to_speech/modules/tts/fs2_orig.py new file mode 100644 index 0000000000000000000000000000000000000000..5aba7f6d99c907bd684cae41964dbf34334e27af --- /dev/null +++ b/text_to_speech/modules/tts/fs2_orig.py @@ -0,0 +1,102 @@ +import torch +from torch import nn +from text_to_speech.modules.commons.layers import Embedding +from text_to_speech.modules.commons.nar_tts_modules import EnergyPredictor, PitchPredictor +from text_to_speech.modules.tts.commons.align_ops import expand_states +from text_to_speech.modules.tts.fs import FastSpeech +from text_to_speech.utils.audio.cwt import cwt2f0, get_lf0_cwt +from text_to_speech.utils.audio.pitch.utils import denorm_f0, f0_to_coarse, norm_f0 +import numpy as np + + +class FastSpeech2Orig(FastSpeech): + def __init__(self, dict_size, hparams, out_dims=None): + super().__init__(dict_size, hparams, out_dims) + predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size + if hparams['use_energy_embed']: + self.energy_embed = Embedding(300, self.hidden_size, 0) + self.energy_predictor = EnergyPredictor( + self.hidden_size, n_chans=predictor_hidden, + n_layers=hparams['predictor_layers'], dropout_rate=hparams['predictor_dropout'], odim=2, + kernel_size=hparams['predictor_kernel']) + if hparams['pitch_type'] == 'cwt' and hparams['use_pitch_embed']: + self.pitch_predictor = PitchPredictor( + self.hidden_size, n_chans=predictor_hidden, + n_layers=hparams['predictor_layers'], dropout_rate=hparams['predictor_dropout'], odim=11, + kernel_size=hparams['predictor_kernel']) + self.cwt_stats_layers = nn.Sequential( + nn.Linear(self.hidden_size, self.hidden_size), nn.ReLU(), + nn.Linear(self.hidden_size, self.hidden_size), nn.ReLU(), nn.Linear(self.hidden_size, 2)) + + def forward(self, txt_tokens, mel2ph=None, spk_embed=None, spk_id=None, + f0=None, uv=None, energy=None, infer=False, **kwargs): + ret = {} + encoder_out = self.encoder(txt_tokens) # [B, T, C] + src_nonpadding = (txt_tokens > 0).float()[:, :, None] + style_embed = self.forward_style_embed(spk_embed, spk_id) + + # add dur + dur_inp = (encoder_out + style_embed) * src_nonpadding + mel2ph = self.forward_dur(dur_inp, mel2ph, txt_tokens, ret) + tgt_nonpadding = (mel2ph > 0).float()[:, :, None] + decoder_inp = decoder_inp_ = expand_states(encoder_out, mel2ph) + + # add pitch and energy embed + if self.hparams['use_pitch_embed']: + pitch_inp = (decoder_inp_ + style_embed) * tgt_nonpadding + decoder_inp = decoder_inp + self.forward_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out) + + # add pitch and energy embed + if self.hparams['use_energy_embed']: + energy_inp = (decoder_inp_ + style_embed) * tgt_nonpadding + decoder_inp = decoder_inp + self.forward_energy(energy_inp, energy, ret) + + # decoder input + ret['decoder_inp'] = decoder_inp = (decoder_inp + style_embed) * tgt_nonpadding + if self.hparams['dec_inp_add_noise']: + B, T, _ = decoder_inp.shape + z = kwargs.get('adv_z', torch.randn([B, T, self.z_channels])).to(decoder_inp.device) + ret['adv_z'] = z + decoder_inp = torch.cat([decoder_inp, z], -1) + decoder_inp = self.dec_inp_noise_proj(decoder_inp) * tgt_nonpadding + ret['mel_out'] = self.forward_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs) + return ret + + def forward_pitch(self, decoder_inp, f0, uv, mel2ph, ret, encoder_out=None): + if self.hparams['pitch_type'] == 'cwt': + decoder_inp = decoder_inp.detach() + self.hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach()) + pitch_padding = mel2ph == 0 + ret['cwt'] = cwt_out = self.pitch_predictor(decoder_inp) + stats_out = self.cwt_stats_layers(decoder_inp.mean(1)) # [B, 2] + mean = ret['f0_mean'] = stats_out[:, 0] + std = ret['f0_std'] = stats_out[:, 1] + cwt_spec = cwt_out[:, :, :10] + if f0 is None: + std = std * self.hparams['cwt_std_scale'] + f0 = self.cwt2f0_norm(cwt_spec, mean, std, mel2ph) + if self.hparams['use_uv']: + assert cwt_out.shape[-1] == 11 + uv = cwt_out[:, :, -1] > 0 + ret['f0_denorm'] = f0_denorm = denorm_f0(f0, uv if self.hparams['use_uv'] else None, + pitch_padding=pitch_padding) + pitch = f0_to_coarse(f0_denorm) # start from 0 + pitch_embed = self.pitch_embed(pitch) + return pitch_embed + else: + return super(FastSpeech2Orig, self).forward_pitch(decoder_inp, f0, uv, mel2ph, ret, encoder_out) + + def forward_energy(self, decoder_inp, energy, ret): + decoder_inp = decoder_inp.detach() + self.hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach()) + ret['energy_pred'] = energy_pred = self.energy_predictor(decoder_inp)[:, :, 0] + energy_embed_inp = energy_pred if energy is None else energy + energy_embed_inp = torch.clamp(energy_embed_inp * 256 // 4, min=0, max=255).long() + energy_embed = self.energy_embed(energy_embed_inp) + return energy_embed + + def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph): + _, cwt_scales = get_lf0_cwt(np.ones(10)) + f0 = cwt2f0(cwt_spec, mean, std, cwt_scales) + f0 = torch.cat( + [f0] + [f0[:, -1:]] * (mel2ph.shape[1] - f0.shape[1]), 1) + f0_norm = norm_f0(f0, None) + return f0_norm diff --git a/text_to_speech/modules/tts/portaspeech/__pycache__/fvae.cpython-38.pyc b/text_to_speech/modules/tts/portaspeech/__pycache__/fvae.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..355626cc2e7a8b7835bbf79c2bbb95af96c82316 Binary files /dev/null and b/text_to_speech/modules/tts/portaspeech/__pycache__/fvae.cpython-38.pyc differ diff --git a/text_to_speech/modules/tts/portaspeech/__pycache__/portaspeech.cpython-38.pyc b/text_to_speech/modules/tts/portaspeech/__pycache__/portaspeech.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9c268460ba7304f4545199a2d677529c5b79c2c Binary files /dev/null and b/text_to_speech/modules/tts/portaspeech/__pycache__/portaspeech.cpython-38.pyc differ diff --git a/text_to_speech/modules/tts/portaspeech/fvae.py b/text_to_speech/modules/tts/portaspeech/fvae.py new file mode 100644 index 0000000000000000000000000000000000000000..c21d78f4a3913758336dc20ef0fe9abbb31d8079 --- /dev/null +++ b/text_to_speech/modules/tts/portaspeech/fvae.py @@ -0,0 +1,203 @@ +import numpy as np +import torch +import torch.distributions as dist +from torch import nn + +from text_to_speech.modules.commons.conv import ConditionalConvBlocks +from text_to_speech.modules.commons.normalizing_flow.res_flow import ResFlow +from text_to_speech.modules.commons.wavenet import WN +# from text_to_speech.modules.tts.syntaspeech.syntactic_graph_encoder import GraphAuxEnc + + +class FVAEEncoder(nn.Module): + def __init__(self, c_in, hidden_size, c_latent, kernel_size, + n_layers, c_cond=0, p_dropout=0, strides=[4], nn_type='wn'): + super().__init__() + self.strides = strides + self.hidden_size = hidden_size + if np.prod(strides) == 1: + self.pre_net = nn.Conv1d(c_in, hidden_size, kernel_size=1) + else: + self.pre_net = nn.Sequential(*[ + nn.Conv1d(c_in, hidden_size, kernel_size=s * 2, stride=s, padding=s // 2) + if i == 0 else + nn.Conv1d(hidden_size, hidden_size, kernel_size=s * 2, stride=s, padding=s // 2) + for i, s in enumerate(strides) + ]) + if nn_type == 'wn': + self.nn = WN(hidden_size, kernel_size, 1, n_layers, c_cond, p_dropout) + elif nn_type == 'conv': + self.nn = ConditionalConvBlocks( + hidden_size, c_cond, hidden_size, None, kernel_size, + layers_in_block=2, is_BTC=False, num_layers=n_layers) + + self.out_proj = nn.Conv1d(hidden_size, c_latent * 2, 1) + self.latent_channels = c_latent + + def forward(self, x, nonpadding, cond): + x = self.pre_net(x) + nonpadding = nonpadding[:, :, ::np.prod(self.strides)][:, :, :x.shape[-1]] + x = x * nonpadding + x = self.nn(x, nonpadding=nonpadding, cond=cond) * nonpadding + x = self.out_proj(x) + m, logs = torch.split(x, self.latent_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) + return z, m, logs, nonpadding + + +class FVAEDecoder(nn.Module): + def __init__(self, c_latent, hidden_size, out_channels, kernel_size, + n_layers, c_cond=0, p_dropout=0, strides=[4], nn_type='wn'): + super().__init__() + self.strides = strides + self.hidden_size = hidden_size + self.pre_net = nn.Sequential(*[ + nn.ConvTranspose1d(c_latent, hidden_size, kernel_size=s, stride=s) + if i == 0 else + nn.ConvTranspose1d(hidden_size, hidden_size, kernel_size=s, stride=s) + for i, s in enumerate(strides) + ]) + if nn_type == 'wn': + self.nn = WN(hidden_size, kernel_size, 1, n_layers, c_cond, p_dropout) + elif nn_type == 'conv': + self.nn = ConditionalConvBlocks( + hidden_size, c_cond, hidden_size, [1] * n_layers, kernel_size, + layers_in_block=2, is_BTC=False) + self.out_proj = nn.Conv1d(hidden_size, out_channels, 1) + + def forward(self, x, nonpadding, cond): + x = self.pre_net(x) + x = x * nonpadding + x = self.nn(x, nonpadding=nonpadding, cond=cond) * nonpadding + x = self.out_proj(x) + return x + + +class FVAE(nn.Module): + def __init__(self, + c_in_out, hidden_size, c_latent, + kernel_size, enc_n_layers, dec_n_layers, c_cond, strides, + use_prior_flow, flow_hidden=None, flow_kernel_size=None, flow_n_steps=None, + encoder_type='wn', decoder_type='wn'): + super(FVAE, self).__init__() + self.strides = strides + self.hidden_size = hidden_size + self.latent_size = c_latent + self.use_prior_flow = use_prior_flow + if np.prod(strides) == 1: + self.g_pre_net = nn.Conv1d(c_cond, c_cond, kernel_size=1) + else: + self.g_pre_net = nn.Sequential(*[ + nn.Conv1d(c_cond, c_cond, kernel_size=s * 2, stride=s, padding=s // 2) + for i, s in enumerate(strides) + ]) + self.encoder = FVAEEncoder(c_in_out, hidden_size, c_latent, kernel_size, + enc_n_layers, c_cond, strides=strides, nn_type=encoder_type) + if use_prior_flow: + self.prior_flow = ResFlow( + c_latent, flow_hidden, flow_kernel_size, flow_n_steps, 4, c_cond=c_cond) + self.decoder = FVAEDecoder(c_latent, hidden_size, c_in_out, kernel_size, + dec_n_layers, c_cond, strides=strides, nn_type=decoder_type) + self.prior_dist = dist.Normal(0, 1) + + def forward(self, x=None, nonpadding=None, cond=None, infer=False, noise_scale=1.0, **kwargs): + """ + + :param x: [B, C_in_out, T] + :param nonpadding: [B, 1, T] + :param cond: [B, C_g, T] + :return: + """ + if nonpadding is None: + nonpadding = 1 + cond_sqz = self.g_pre_net(cond) + if not infer: + z_q, m_q, logs_q, nonpadding_sqz = self.encoder(x, nonpadding, cond_sqz) + q_dist = dist.Normal(m_q, logs_q.exp()) + if self.use_prior_flow: + logqx = q_dist.log_prob(z_q) + z_p = self.prior_flow(z_q, nonpadding_sqz, cond_sqz) + logpx = self.prior_dist.log_prob(z_p) + loss_kl = ((logqx - logpx) * nonpadding_sqz).sum() / nonpadding_sqz.sum() / logqx.shape[1] + else: + loss_kl = torch.distributions.kl_divergence(q_dist, self.prior_dist) + loss_kl = (loss_kl * nonpadding_sqz).sum() / nonpadding_sqz.sum() / z_q.shape[1] + z_p = None + return z_q, loss_kl, z_p, m_q, logs_q + else: + latent_shape = [cond_sqz.shape[0], self.latent_size, cond_sqz.shape[2]] + z_p = torch.randn(latent_shape).to(cond.device) * noise_scale + if self.use_prior_flow: + z_p = self.prior_flow(z_p, 1, cond_sqz, reverse=True) + return z_p + + +class SyntaFVAE(nn.Module): + def __init__(self, + c_in_out, hidden_size, c_latent, + kernel_size, enc_n_layers, dec_n_layers, c_cond, strides, + use_prior_flow, flow_hidden=None, flow_kernel_size=None, flow_n_steps=None, + encoder_type='wn', decoder_type='wn'): + super(SyntaFVAE, self).__init__() + self.strides = strides + self.hidden_size = hidden_size + self.latent_size = c_latent + self.use_prior_flow = use_prior_flow + if np.prod(strides) == 1: + self.g_pre_net = nn.Conv1d(c_cond, c_cond, kernel_size=1) + else: + self.g_pre_net = nn.Sequential(*[ + nn.Conv1d(c_cond, c_cond, kernel_size=s * 2, stride=s, padding=s // 2) + for i, s in enumerate(strides) + ]) + self.encoder = FVAEEncoder(c_in_out, hidden_size, c_latent, kernel_size, + enc_n_layers, c_cond, strides=strides, nn_type=encoder_type) + if use_prior_flow: + self.prior_flow = ResFlow( + c_latent, flow_hidden, flow_kernel_size, flow_n_steps, 4, c_cond=c_cond) + self.decoder = FVAEDecoder(c_latent, hidden_size, c_in_out, kernel_size, + dec_n_layers, c_cond, strides=strides, nn_type=decoder_type) + self.prior_dist = dist.Normal(0, 1) + self.graph_encoder = GraphAuxEnc(in_dim=hidden_size, hid_dim=hidden_size,out_dim=hidden_size) + + def forward(self, x=None, nonpadding=None, cond=None, infer=False, noise_scale=1.0, + mel2word=None, ph2word=None, graph_lst=None, etypes_lst=None): + """ + + :param x: target mel, [B, C_in_out, T] + :param nonpadding: [B, 1, T] + :param cond: phoneme encoding, [B, C_g, T] + :return: + """ + word_len = ph2word.max(dim=1)[0] + ph_encoding_for_graph = cond.detach() + 0.1 * (cond - cond.detach()) # only 0.1x grad can pass through + _, ph_out_word_encoding_for_graph = GraphAuxEnc.ph_encoding_to_word_encoding(ph_encoding_for_graph.transpose(1,2), mel2word, word_len) + t_m = mel2word.shape[-1] + g_graph = self.graph_encoder.word_forward(graph_lst=graph_lst, word_encoding=ph_out_word_encoding_for_graph, etypes_lst=etypes_lst) + g_graph = g_graph.transpose(1,2) + g_graph = GraphAuxEnc._postprocess_word2ph(g_graph,mel2word,t_m) + g_graph = g_graph.transpose(1,2) + cond = cond + g_graph * 1. + + if nonpadding is None: + nonpadding = 1 + cond_sqz = self.g_pre_net(cond) + if not infer: + z_q, m_q, logs_q, nonpadding_sqz = self.encoder(x, nonpadding, cond_sqz) + q_dist = dist.Normal(m_q, logs_q.exp()) + if self.use_prior_flow: + logqx = q_dist.log_prob(z_q) + z_p = self.prior_flow(z_q, nonpadding_sqz, cond_sqz) + logpx = self.prior_dist.log_prob(z_p) + loss_kl = ((logqx - logpx) * nonpadding_sqz).sum() / nonpadding_sqz.sum() / logqx.shape[1] + else: + loss_kl = torch.distributions.kl_divergence(q_dist, self.prior_dist) + loss_kl = (loss_kl * nonpadding_sqz).sum() / nonpadding_sqz.sum() / z_q.shape[1] + z_p = None + return z_q, loss_kl, z_p, m_q, logs_q + else: + latent_shape = [cond_sqz.shape[0], self.latent_size, cond_sqz.shape[2]] + z_p = torch.randn(latent_shape).to(cond.device) * noise_scale + if self.use_prior_flow: + z_p = self.prior_flow(z_p, 1, cond_sqz, reverse=True) + return z_p \ No newline at end of file diff --git a/text_to_speech/modules/tts/portaspeech/portaspeech.py b/text_to_speech/modules/tts/portaspeech/portaspeech.py new file mode 100644 index 0000000000000000000000000000000000000000..6eb0dfd23f2b65db53bfd3a3d9930a335ca90506 --- /dev/null +++ b/text_to_speech/modules/tts/portaspeech/portaspeech.py @@ -0,0 +1,233 @@ +import math +import torch +import torch.nn.functional as F +from torch import nn +from torch.nn import Linear + +from text_to_speech.modules.commons.conv import ConvBlocks, ConditionalConvBlocks +from text_to_speech.modules.commons.layers import Embedding +from text_to_speech.modules.commons.rel_transformer import RelTransformerEncoder +from text_to_speech.modules.commons.transformer import MultiheadAttention, FFTBlocks +from text_to_speech.modules.tts.commons.align_ops import clip_mel2token_to_multiple, build_word_mask, expand_states, mel2ph_to_mel2word +from text_to_speech.modules.tts.fs import FS_DECODERS, FastSpeech +from text_to_speech.modules.tts.portaspeech.fvae import FVAE +from text_to_speech.utils.commons.meters import Timer +from text_to_speech.utils.nn.seq_utils import group_hidden_by_segs + + +class SinusoidalPosEmb(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x): + """ + + :param x: [B, T] + :return: [B, T, H] + """ + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device) * -emb) + emb = x[:, :, None] * emb[None, :] + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +class PortaSpeech(FastSpeech): + def __init__(self, ph_dict_size, word_dict_size, hparams, out_dims=None): + self.hparams = hparams + super().__init__(ph_dict_size, hparams, out_dims) + # build linguistic encoder + if hparams['use_word_encoder']: + # default False, use independent word embedding instead of phoneme encoding to represent word + self.word_encoder = RelTransformerEncoder( + word_dict_size, self.hidden_size, self.hidden_size, self.hidden_size, 2, + hparams['word_enc_layers'], hparams['enc_ffn_kernel_size']) + if hparams['dur_level'] == 'word': + if hparams['word_encoder_type'] == 'rel_fft': + self.ph2word_encoder = RelTransformerEncoder( + 0, self.hidden_size, self.hidden_size, self.hidden_size, 2, + hparams['word_enc_layers'], hparams['enc_ffn_kernel_size']) + if hparams['word_encoder_type'] == 'fft': + self.ph2word_encoder = FFTBlocks( + self.hidden_size, hparams['word_enc_layers'], 1, num_heads=hparams['num_heads']) + self.sin_pos = SinusoidalPosEmb(self.hidden_size) + self.enc_pos_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) + self.dec_query_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) + self.dec_res_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) + self.attn = MultiheadAttention(self.hidden_size, 1, encoder_decoder_attention=True, bias=False) + self.attn.enable_torch_version = False + if hparams['text_encoder_postnet']: + self.text_encoder_postnet = ConvBlocks( + self.hidden_size, self.hidden_size, [1] * 3, 5, layers_in_block=2) + else: + self.sin_pos = SinusoidalPosEmb(self.hidden_size) + # build VAE decoder + if hparams['use_fvae']: + del self.decoder + del self.mel_out + self.fvae = FVAE( + c_in_out=self.out_dims, + hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'], + kernel_size=hparams['fvae_kernel_size'], + enc_n_layers=hparams['fvae_enc_n_layers'], + dec_n_layers=hparams['fvae_dec_n_layers'], + c_cond=self.hidden_size, + use_prior_flow=hparams['use_prior_flow'], + flow_hidden=hparams['prior_flow_hidden'], + flow_kernel_size=hparams['prior_flow_kernel_size'], + flow_n_steps=hparams['prior_flow_n_blocks'], + strides=[hparams['fvae_strides']], + encoder_type=hparams['fvae_encoder_type'], + decoder_type=hparams['fvae_decoder_type'], + ) + else: + self.decoder = FS_DECODERS[hparams['decoder_type']](hparams) + self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True) + if hparams['use_pitch_embed']: + self.pitch_embed = Embedding(300, self.hidden_size, 0) + if self.hparams['add_word_pos']: + self.word_pos_proj = Linear(self.hidden_size, self.hidden_size) + + def build_embedding(self, dictionary, embed_dim): + num_embeddings = len(dictionary) + emb = Embedding(num_embeddings, embed_dim, self.padding_idx) + return emb + + def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None, + spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None, + global_step=None, *args, **kwargs): + ret = {} + style_embed = self.forward_style_embed(spk_embed, spk_id) + x, tgt_nonpadding = self.run_text_encoder( + txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, **kwargs) + x = x * tgt_nonpadding + ret['nonpadding'] = tgt_nonpadding + if self.hparams['use_pitch_embed']: + x = x + self.pitch_embed(pitch) + ret['decoder_inp'] = x + ret['mel_out_fvae'] = ret['mel_out'] = self.run_decoder(x, tgt_nonpadding, ret, infer, tgt_mels, global_step) + return ret + + def run_text_encoder(self, txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, **kwargs): + word2word = torch.arange(word_len)[None, :].to(ph2word.device) + 1 # [B, T_mel, T_word] + src_nonpadding = (txt_tokens > 0).float()[:, :, None] + use_bert = self.hparams.get("use_bert") is True + if use_bert: + ph_encoder_out = self.ph_encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=ph2word, + graph_lst=kwargs['graph_lst'], etypes_lst=kwargs['etypes_lst'], + cl_feats=kwargs['cl_feats'], ret=ret) * src_nonpadding + style_embed + else: + ph_encoder_out = self.ph_encoder(txt_tokens) * src_nonpadding + style_embed + if self.hparams['use_word_encoder']: + word_encoder_out = self.word_encoder(word_tokens) + style_embed + ph_encoder_out = ph_encoder_out + expand_states(word_encoder_out, ph2word) + if self.hparams['dur_level'] == 'word': + word_encoder_out = 0 + h_ph_gb_word = group_hidden_by_segs(ph_encoder_out, ph2word, word_len)[0] + word_encoder_out = word_encoder_out + self.ph2word_encoder(h_ph_gb_word) + if self.hparams['use_word_encoder']: + word_encoder_out = word_encoder_out + self.word_encoder(word_tokens) + mel2word = self.forward_dur(ph_encoder_out, mel2word, ret, ph2word=ph2word, word_len=word_len) + mel2word = clip_mel2token_to_multiple(mel2word, self.hparams['frames_multiple']) + tgt_nonpadding = (mel2word > 0).float()[:, :, None] + enc_pos = self.get_pos_embed(word2word, ph2word) # [B, T_ph, H] + dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H] + dec_word_mask = build_word_mask(mel2word, ph2word) # [B, T_mel, T_ph] + x, weight = self.attention(ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask) + if self.hparams['add_word_pos']: + x = x + self.word_pos_proj(dec_pos) + ret['attn'] = weight + else: + mel2ph = self.forward_dur(ph_encoder_out, mel2ph, ret) + mel2ph = clip_mel2token_to_multiple(mel2ph, self.hparams['frames_multiple']) + mel2word = mel2ph_to_mel2word(mel2ph, ph2word) + x = expand_states(ph_encoder_out, mel2ph) + if self.hparams['add_word_pos']: + dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H] + x = x + self.word_pos_proj(dec_pos) + tgt_nonpadding = (mel2ph > 0).float()[:, :, None] + if self.hparams['use_word_encoder']: + x = x + expand_states(word_encoder_out, mel2word) + return x, tgt_nonpadding + + def attention(self, ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask): + ph_kv = self.enc_pos_proj(torch.cat([ph_encoder_out, enc_pos], -1)) + word_enc_out_expend = expand_states(word_encoder_out, mel2word) + word_enc_out_expend = torch.cat([word_enc_out_expend, dec_pos], -1) + if self.hparams['text_encoder_postnet']: + word_enc_out_expend = self.dec_res_proj(word_enc_out_expend) + word_enc_out_expend = self.text_encoder_postnet(word_enc_out_expend) + dec_q = x_res = word_enc_out_expend + else: + dec_q = self.dec_query_proj(word_enc_out_expend) + x_res = self.dec_res_proj(word_enc_out_expend) + ph_kv, dec_q = ph_kv.transpose(0, 1), dec_q.transpose(0, 1) + x, (weight, _) = self.attn(dec_q, ph_kv, ph_kv, attn_mask=(1 - dec_word_mask) * -1e9) + x = x.transpose(0, 1) + x = x + x_res + return x, weight + + def run_decoder(self, x, tgt_nonpadding, ret, infer, tgt_mels=None, global_step=0): + if not self.hparams['use_fvae']: + x = self.decoder(x) + x = self.mel_out(x) + ret['kl'] = 0 + return x * tgt_nonpadding + else: + decoder_inp = x + x = x.transpose(1, 2) # [B, H, T] + tgt_nonpadding_BHT = tgt_nonpadding.transpose(1, 2) # [B, H, T] + if infer: + z = self.fvae(cond=x, infer=True) + else: + tgt_mels = tgt_mels.transpose(1, 2) # [B, 80, T] + z, ret['kl'], ret['z_p'], ret['m_q'], ret['logs_q'] = self.fvae( + tgt_mels, tgt_nonpadding_BHT, cond=x) + if global_step < self.hparams['posterior_start_steps']: + z = torch.randn_like(z) + x_recon = self.fvae.decoder(z, nonpadding=tgt_nonpadding_BHT, cond=x).transpose(1, 2) + ret['pre_mel_out'] = x_recon + return x_recon + + def forward_dur(self, dur_input, mel2word, ret, **kwargs): + """ + + :param dur_input: [B, T_txt, H] + :param mel2ph: [B, T_mel] + :param txt_tokens: [B, T_txt] + :param ret: + :return: + """ + src_padding = dur_input.data.abs().sum(-1) == 0 + dur_input = dur_input.detach() + self.hparams['predictor_grad'] * (dur_input - dur_input.detach()) + dur = self.dur_predictor(dur_input, src_padding) + if self.hparams['dur_level'] == 'word': + word_len = kwargs['word_len'] + ph2word = kwargs['ph2word'] + B, T_ph = ph2word.shape + dur = torch.zeros([B, word_len.max() + 1]).to(ph2word.device).scatter_add(1, ph2word, dur) + dur = dur[:, 1:] + ret['dur'] = dur + if mel2word is None: + mel2word = self.length_regulator(dur).detach() + return mel2word + + def get_pos_embed(self, word2word, x2word): + x_pos = build_word_mask(word2word, x2word).float() # [B, T_word, T_ph] + x_pos = (x_pos.cumsum(-1) / x_pos.sum(-1).clamp(min=1)[..., None] * x_pos).sum(1) + x_pos = self.sin_pos(x_pos.float()) # [B, T_ph, H] + return x_pos + + def store_inverse_all(self): + def remove_weight_norm(m): + try: + if hasattr(m, 'store_inverse'): + m.store_inverse() + nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(remove_weight_norm) diff --git a/text_to_speech/modules/tts/portaspeech/portaspeech_flow.py b/text_to_speech/modules/tts/portaspeech/portaspeech_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..19f4cf99d9c998b326963ec8f30935bbf1127caf --- /dev/null +++ b/text_to_speech/modules/tts/portaspeech/portaspeech_flow.py @@ -0,0 +1,75 @@ +import torch +import torch.distributions as dist +from torch import nn +from text_to_speech.modules.commons.normalizing_flow.glow_modules import Glow +from text_to_speech.modules.tts.portaspeech.portaspeech import PortaSpeech + + +class PortaSpeechFlow(PortaSpeech): + def __init__(self, ph_dict_size, word_dict_size, hparams, out_dims=None): + super().__init__(ph_dict_size, word_dict_size, hparams, out_dims) + cond_hs = 80 + if hparams.get('use_txt_cond', True): + cond_hs = cond_hs + hparams['hidden_size'] + if hparams.get('use_latent_cond', False): + cond_hs = cond_hs + hparams['latent_size'] + if hparams['use_cond_proj']: + self.g_proj = nn.Conv1d(cond_hs, 160, 5, padding=2) + cond_hs = 160 + self.post_flow = Glow( + 80, hparams['post_glow_hidden'], hparams['post_glow_kernel_size'], 1, + hparams['post_glow_n_blocks'], hparams['post_glow_n_block_layers'], + n_split=4, n_sqz=2, + gin_channels=cond_hs, + share_cond_layers=hparams['post_share_cond_layers'], + share_wn_layers=hparams['share_wn_layers'], + sigmoid_scale=hparams['sigmoid_scale'] + ) + self.prior_dist = dist.Normal(0, 1) + + def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None, + spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None, + forward_post_glow=True, two_stage=True, global_step=None, **kwargs): + is_training = self.training + train_fvae = not (forward_post_glow and two_stage) + if not train_fvae: + self.eval() + with torch.set_grad_enabled(mode=train_fvae): + ret = super(PortaSpeechFlow, self).forward( + txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, + spk_embed, spk_id, pitch, infer, tgt_mels, global_step, **kwargs) + if (forward_post_glow or not two_stage) and self.hparams['use_post_flow']: + self.run_post_glow(tgt_mels, infer, is_training, ret) + return ret + + def run_post_glow(self, tgt_mels, infer, is_training, ret): + x_recon = ret['mel_out'].transpose(1, 2) + g = x_recon + B, _, T = g.shape + if self.hparams.get('use_txt_cond', True): + g = torch.cat([g, ret['decoder_inp'].transpose(1, 2)], 1) + if self.hparams.get('use_latent_cond', False): + g_z = ret['z_p'][:, :, :, None].repeat(1, 1, 1, 4).reshape(B, -1, T) + g = torch.cat([g, g_z], 1) + if self.hparams['use_cond_proj']: + g = self.g_proj(g) + prior_dist = self.prior_dist + if not infer: + if is_training: + self.post_flow.train() + nonpadding = ret['nonpadding'].transpose(1, 2) + y_lengths = nonpadding.sum(-1) + if self.hparams['detach_postflow_input']: + g = g.detach() + tgt_mels = tgt_mels.transpose(1, 2) + z_postflow, ldj = self.post_flow(tgt_mels, nonpadding, g=g) + ldj = ldj / y_lengths / 80 + ret['z_pf'], ret['ldj_pf'] = z_postflow, ldj + ret['postflow'] = -prior_dist.log_prob(z_postflow).mean() - ldj.mean() + if torch.isnan(ret['postflow']): + ret['postflow'] = None + else: + nonpadding = torch.ones_like(x_recon[:, :1, :]) + z_post = torch.randn(x_recon.shape).to(g.device) * self.hparams['noise_scale'] + x_recon, _ = self.post_flow(z_post, nonpadding, g, reverse=True) + ret['mel_out'] = x_recon.transpose(1, 2) diff --git a/text_to_speech/modules/tts/syntaspeech/multi_window_disc.py b/text_to_speech/modules/tts/syntaspeech/multi_window_disc.py new file mode 100644 index 0000000000000000000000000000000000000000..a8166ac5b514e501043b9fed13aab01421a6c10e --- /dev/null +++ b/text_to_speech/modules/tts/syntaspeech/multi_window_disc.py @@ -0,0 +1,136 @@ +import numpy as np +import torch +import torch.nn as nn + + +class SingleWindowDisc(nn.Module): + def __init__(self, time_length, freq_length=80, kernel=(3, 3), c_in=1, hidden_size=128): + super().__init__() + padding = (kernel[0] // 2, kernel[1] // 2) + self.model = nn.ModuleList([ + nn.Sequential(*[ + nn.Conv2d(c_in, hidden_size, kernel, (2, 2), padding), + nn.LeakyReLU(0.2, inplace=True), + nn.Dropout2d(0.25), + nn.BatchNorm2d(hidden_size, 0.8) + ]), + nn.Sequential(*[ + nn.Conv2d(hidden_size, hidden_size, kernel, (2, 2), padding), + nn.LeakyReLU(0.2, inplace=True), + nn.Dropout2d(0.25), + nn.BatchNorm2d(hidden_size, 0.8) + ]), + nn.Sequential(*[ + nn.Conv2d(hidden_size, hidden_size, kernel, (2, 2), padding), + nn.LeakyReLU(0.2, inplace=True), + nn.Dropout2d(0.25), + ]), + ]) + ds_size = (time_length // 2 ** 3, (freq_length + 7) // 2 ** 3) + self.adv_layer = nn.Linear(hidden_size * ds_size[0] * ds_size[1], 1) + + def forward(self, x): + """ + :param x: [B, C, T, n_bins] + :return: validity: [B, 1], h: List of hiddens + """ + h = [] + for l in self.model: + x = l(x) + h.append(x) + x = x.view(x.shape[0], -1) + validity = self.adv_layer(x) # [B, 1] + return validity, h + + +class MultiWindowDiscriminator(nn.Module): + def __init__(self, time_lengths, freq_length=80, kernel=(3, 3), c_in=1, hidden_size=128): + super(MultiWindowDiscriminator, self).__init__() + self.win_lengths = time_lengths + self.discriminators = nn.ModuleList() + + for time_length in time_lengths: + self.discriminators += [SingleWindowDisc(time_length, freq_length, kernel, c_in=c_in, hidden_size=hidden_size)] + + def forward(self, x, x_len, start_frames_wins=None): + ''' + Args: + x (tensor): input mel, (B, c_in, T, n_bins). + x_length (tensor): len of per mel. (B,). + + Returns: + tensor : (B). + ''' + validity = [] + if start_frames_wins is None: + start_frames_wins = [None] * len(self.discriminators) + h = [] + for i, start_frames in zip(range(len(self.discriminators)), start_frames_wins): + x_clip, start_frames = self.clip(x, x_len, self.win_lengths[i], start_frames) # (B, win_length, C) + start_frames_wins[i] = start_frames + if x_clip is None: + continue + x_clip, h_ = self.discriminators[i](x_clip) + h += h_ + validity.append(x_clip) + if len(validity) != len(self.discriminators): + return None, start_frames_wins, h + validity = sum(validity) # [B] + return validity, start_frames_wins, h + + def clip(self, x, x_len, win_length, start_frames=None): + '''Ramdom clip x to win_length. + Args: + x (tensor) : (B, c_in, T, n_bins). + cond (tensor) : (B, T, H). + x_len (tensor) : (B,). + win_length (int): target clip length + + Returns: + (tensor) : (B, c_in, win_length, n_bins). + + ''' + T_start = 0 + T_end = x_len.max() - win_length + if T_end < 0: + return None, None, start_frames + T_end = T_end.item() + if start_frames is None: + start_frame = np.random.randint(low=T_start, high=T_end + 1) + start_frames = [start_frame] * x.size(0) + else: + start_frame = start_frames[0] + x_batch = x[:, :, start_frame: start_frame + win_length] + return x_batch, start_frames + + +class Discriminator(nn.Module): + def __init__(self, time_lengths=[32, 64, 128], freq_length=80, kernel=(3, 3), c_in=1, + hidden_size=128): + super(Discriminator, self).__init__() + self.time_lengths = time_lengths + self.discriminator = MultiWindowDiscriminator( + freq_length=freq_length, + time_lengths=time_lengths, + kernel=kernel, + c_in=c_in, hidden_size=hidden_size + ) + + + def forward(self, x, start_frames_wins=None): + """ + + :param x: [B, T, 80] + :param return_y_only: + :return: + """ + if len(x.shape) == 3: + x = x[:, None, :, :] # [B,1,T,80] + x_len = x.sum([1, -1]).ne(0).int().sum([-1]) + ret = {'y_c': None, 'y': None} + ret['y'], start_frames_wins, ret['h'] = self.discriminator( + x, x_len, start_frames_wins=start_frames_wins) + + ret['start_frames_wins'] = start_frames_wins + return ret + diff --git a/text_to_speech/modules/tts/syntaspeech/syntactic_graph_buider.py b/text_to_speech/modules/tts/syntaspeech/syntactic_graph_buider.py new file mode 100644 index 0000000000000000000000000000000000000000..6334b6a0435390abb05f04638afc6e2a57a8f000 --- /dev/null +++ b/text_to_speech/modules/tts/syntaspeech/syntactic_graph_buider.py @@ -0,0 +1,294 @@ +from copy import deepcopy +import torch +import dgl +import stanza +import networkx as nx + +class Sentence2GraphParser: + def __init__(self, language='zh', use_gpu=False, download=False): + self.language = language + if download: + self.stanza_parser = stanza.Pipeline(lang=language, use_gpu=use_gpu) + else: + self.stanza_parser = stanza.Pipeline(lang=language, use_gpu=use_gpu, download_method=None) + + def parse(self, clean_sentence=None, words=None, ph_words=None): + if self.language == 'zh': + assert words is not None and ph_words is not None + ret = self._parse_zh(words, ph_words) + elif self.language == 'en': + assert clean_sentence is not None + ret = self._parse_en(clean_sentence) + else: + raise NotImplementedError + return ret + + def _parse_zh(self, words, ph_words, enable_backward_edge=True, enable_recur_edge=True, + enable_inter_sentence_edge=True, sequential_edge=False): + """ + words: , each character in chinese is one item + ph_words: , each character in chinese is one item, represented by the phoneme + Example: + text1 = '宝马配挂跛骡鞍,貂蝉怨枕董翁榻.' + words = ['', '宝', '马', '配', '挂', '跛', '骡', '鞍', ',' + , '貂', '蝉', '怨', '枕', '董', '翁', '榻', ''] + ph_words = ['', 'b_ao3_|', 'm_a3_#', 'p_ei4_|', 'g_ua4_#', + 'b_o3_#', 'l_uo2_|', 'an1', ',', 'd_iao1_|', + 'ch_an2_#', 'van4_#', 'zh_en3_#', 'd_ong3_|', 'ueng1_#', 't_a4', ''] + """ + words, ph_words = words[1:-1], ph_words[1:-1] # delete and + for i, p_w in enumerate(ph_words): + if p_w == ',': + # change english ',' into chinese + # we found it necessary in stanza's dependency parsing + words[i], ph_words[i] = ',', ',' + tmp_words = deepcopy(words) + num_added_space = 0 + for i, p_w in enumerate(ph_words): + if p_w.endswith("#"): + # add a blank after the p_w with '#', to separate words + tmp_words.insert(num_added_space + i + 1, " ") + num_added_space += 1 + if p_w in [',', ',']: + # add one blank before and after ', ', respectively + tmp_words.insert(num_added_space + i + 1, " ") # insert behind ',' first + tmp_words.insert(num_added_space + i, " ") # insert before + num_added_space += 2 + clean_text = ''.join(tmp_words).strip() + parser_out = self.stanza_parser(clean_text) + + idx_to_word = {i + 1: w for i, w in enumerate(words)} + + vocab_nodes = {} + vocab_idx_offset = 0 + for sentence in parser_out.sentences: + num_nodes_in_current_sentence = 0 + for vocab_node in sentence.words: + num_nodes_in_current_sentence += 1 + vocab_idx = vocab_node.id + vocab_idx_offset + vocab_text = vocab_node.text.replace(" ", "") # delete blank in vocab + vocab_nodes[vocab_idx] = vocab_text + vocab_idx_offset += num_nodes_in_current_sentence + + # start vocab-to-word alignment + vocab_to_word = {} + current_word_idx = 1 + for vocab_i in vocab_nodes.keys(): + vocab_to_word[vocab_i] = [] + for w_in_vocab_i in vocab_nodes[vocab_i]: + if w_in_vocab_i != idx_to_word[current_word_idx]: + raise ValueError("Word Mismatch!") + vocab_to_word[vocab_i].append(current_word_idx) # add a path (vocab_node_idx, word_global_idx) + current_word_idx += 1 + + # then we compute the vocab-level edges + if len(parser_out.sentences) > 5: + print("Detect more than 5 input sentence! pls check whether the sentence is too long!") + vocab_level_source_id, vocab_level_dest_id = [], [] + vocab_level_edge_types = [] + sentences_heads = [] + vocab_id_offset = 0 + # get forward edges + for s in parser_out.sentences: + for w in s.words: + w_idx = w.id + vocab_id_offset # it starts from 1, just same as binarizer + w_dest_idx = w.head + vocab_id_offset + if w.head == 0: + sentences_heads.append(w_idx) + continue + vocab_level_source_id.append(w_idx) + vocab_level_dest_id.append(w_dest_idx) + vocab_id_offset += len(s.words) + vocab_level_edge_types += [0] * len(vocab_level_source_id) + num_vocab = vocab_id_offset + + # optional: get backward edges + if enable_backward_edge: + back_source, back_dest = deepcopy(vocab_level_dest_id), deepcopy(vocab_level_source_id) + vocab_level_source_id += back_source + vocab_level_dest_id += back_dest + vocab_level_edge_types += [1] * len(back_source) + + # optional: get inter-sentence edges if num_sentences > 1 + inter_sentence_source, inter_sentence_dest = [], [] + if enable_inter_sentence_edge and len(sentences_heads) > 1: + def get_full_graph_edges(nodes): + tmp_edges = [] + for i, node_i in enumerate(nodes): + for j, node_j in enumerate(nodes): + if i == j: + continue + tmp_edges.append((node_i, node_j)) + return tmp_edges + + tmp_edges = get_full_graph_edges(sentences_heads) + for (source, dest) in tmp_edges: + inter_sentence_source.append(source) + inter_sentence_dest.append(dest) + vocab_level_source_id += inter_sentence_source + vocab_level_dest_id += inter_sentence_dest + vocab_level_edge_types += [3] * len(inter_sentence_source) + + if sequential_edge: + seq_source, seq_dest = list(range(1, num_vocab)) + list(range(num_vocab, 0, -1)), \ + list(range(2, num_vocab + 1)) + list(range(num_vocab - 1, -1, -1)) + vocab_level_source_id += seq_source + vocab_level_dest_id += seq_dest + vocab_level_edge_types += [4] * (num_vocab - 1) + [5] * (num_vocab - 1) + + # Then, we use the vocab-level edges and the vocab-to-word path, to construct the word-level graph + num_word = len(words) + source_id, dest_id, edge_types = [], [], [] + for (vocab_start, vocab_end, vocab_edge_type) in zip(vocab_level_source_id, vocab_level_dest_id, + vocab_level_edge_types): + # connect the first word in the vocab + word_start = min(vocab_to_word[vocab_start]) + word_end = min(vocab_to_word[vocab_end]) + source_id.append(word_start) + dest_id.append(word_end) + edge_types.append(vocab_edge_type) + + # sequential connection in words + for word_indices_in_v in vocab_to_word.values(): + for i, word_idx in enumerate(word_indices_in_v): + if i + 1 < len(word_indices_in_v): + source_id.append(word_idx) + dest_id.append(word_idx + 1) + edge_types.append(4) + if i - 1 >= 0: + source_id.append(word_idx) + dest_id.append(word_idx - 1) + edge_types.append(5) + + # optional: get recurrent edges + if enable_recur_edge: + recur_source, recur_dest = list(range(1, num_word + 1)), list(range(1, num_word + 1)) + source_id += recur_source + dest_id += recur_dest + edge_types += [2] * len(recur_source) + + # add and + source_id += [0, num_word + 1, 1, num_word] + dest_id += [1, num_word, 0, num_word + 1] + edge_types += [4, 4, 5, 5] # 4 represents sequentially forward, 5 is sequential backward + + edges = (torch.LongTensor(source_id), torch.LongTensor(dest_id)) + dgl_graph = dgl.graph(edges) + assert dgl_graph.num_edges() == len(edge_types) + return dgl_graph, torch.LongTensor(edge_types) + + def _parse_en(self, clean_sentence, enable_backward_edge=True, enable_recur_edge=True, + enable_inter_sentence_edge=True, sequential_edge=False, consider_bos_for_index=True): + """ + clean_sentence: , each word or punctuation should be separated by one blank. + """ + edge_types = [] # required for gated graph neural network + clean_sentence = clean_sentence.strip() + if clean_sentence.endswith((" .", " ,", " ;", " :", " ?", " !")): + clean_sentence = clean_sentence[:-2] + if clean_sentence.startswith(". "): + clean_sentence = clean_sentence[2:] + parser_out = self.stanza_parser(clean_sentence) + if len(parser_out.sentences) > 5: + print("Detect more than 5 input sentence! pls check whether the sentence is too long!") + print(clean_sentence) + source_id, dest_id = [], [] + sentences_heads = [] + word_id_offset = 0 + # get forward edges + for s in parser_out.sentences: + for w in s.words: + w_idx = w.id + word_id_offset # it starts from 1, just same as binarizer + w_dest_idx = w.head + word_id_offset + if w.head == 0: + sentences_heads.append(w_idx) + continue + source_id.append(w_idx) + dest_id.append(w_dest_idx) + word_id_offset += len(s.words) + num_word = word_id_offset + edge_types += [0] * len(source_id) + + # optional: get backward edges + if enable_backward_edge: + back_source, back_dest = deepcopy(dest_id), deepcopy(source_id) + source_id += back_source + dest_id += back_dest + edge_types += [1] * len(back_source) + + # optional: get recurrent edges + if enable_recur_edge: + recur_source, recur_dest = list(range(1, num_word + 1)), list(range(1, num_word + 1)) + source_id += recur_source + dest_id += recur_dest + edge_types += [2] * len(recur_source) + + # optional: get inter-sentence edges if num_sentences > 1 + inter_sentence_source, inter_sentence_dest = [], [] + if enable_inter_sentence_edge and len(sentences_heads) > 1: + def get_full_graph_edges(nodes): + tmp_edges = [] + for i, node_i in enumerate(nodes): + for j, node_j in enumerate(nodes): + if i == j: + continue + tmp_edges.append((node_i, node_j)) + return tmp_edges + + tmp_edges = get_full_graph_edges(sentences_heads) + for (source, dest) in tmp_edges: + inter_sentence_source.append(source) + inter_sentence_dest.append(dest) + source_id += inter_sentence_source + dest_id += inter_sentence_dest + edge_types += [3] * len(inter_sentence_source) + + # add and + source_id += [0, num_word + 1, 1, num_word] + dest_id += [1, num_word, 0, num_word + 1] + edge_types += [4, 4, 5, 5] # 4 represents sequentially forward, 5 is sequential backward + + # optional: sequential edge + if sequential_edge: + seq_source, seq_dest = list(range(1, num_word)) + list(range(num_word, 0, -1)), \ + list(range(2, num_word + 1)) + list(range(num_word - 1, -1, -1)) + source_id += seq_source + dest_id += seq_dest + edge_types += [4] * (num_word - 1) + [5] * (num_word - 1) + if consider_bos_for_index: + edges = (torch.LongTensor(source_id), torch.LongTensor(dest_id)) + else: + edges = (torch.LongTensor(source_id) - 1, torch.LongTensor(dest_id) - 1) + dgl_graph = dgl.graph(edges) + assert dgl_graph.num_edges() == len(edge_types) + return dgl_graph, torch.LongTensor(edge_types) + + +def plot_dgl_sentence_graph(dgl_graph, labels): + """ + labels = {idx: word for idx,word in enumerate(sentence.split(" ")) } + """ + import matplotlib.pyplot as plt + nx_graph = dgl_graph.to_networkx() + pos = nx.random_layout(nx_graph) + nx.draw(nx_graph, pos, with_labels=False) + nx.draw_networkx_labels(nx_graph, pos, labels) + plt.show() + +if __name__ == '__main__': + + # Unit Test for Chinese Graph Builder + parser = Sentence2GraphParser("zh") + text1 = '宝马配挂跛骡鞍,貂蝉怨枕董翁榻.' + words = ['', '宝', '马', '配', '挂', '跛', '骡', '鞍', ',', '貂', '蝉', '怨', '枕', '董', '翁', '榻', ''] + ph_words = ['', 'b_ao3_|', 'm_a3_#', 'p_ei4_|', 'g_ua4_#', 'b_o3_#', 'l_uo2_|', 'an1', ',', 'd_iao1_|', + 'ch_an2_#', 'van4_#', 'zh_en3_#', 'd_ong3_|', 'ueng1_#', 't_a4', ''] + graph1, etypes1 = parser.parse(text1, words, ph_words) + plot_dgl_sentence_graph(graph1, {i: w for i, w in enumerate(ph_words)}) + + # Unit Test for English Graph Builder + parser = Sentence2GraphParser("en") + text2 = "I love you . You love me . Mixue ice-scream and tea ." + graph2, etypes2 = parser.parse(text2) + plot_dgl_sentence_graph(graph2, {i: w for i, w in enumerate((" " + text2 + " ").split(" "))}) + diff --git a/text_to_speech/modules/tts/syntaspeech/syntactic_graph_encoder.py b/text_to_speech/modules/tts/syntaspeech/syntactic_graph_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..e345f0f36113977d601b0b3100e54c59897a712a --- /dev/null +++ b/text_to_speech/modules/tts/syntaspeech/syntactic_graph_encoder.py @@ -0,0 +1,193 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import dgl +from dgl.nn.pytorch import GatedGraphConv + +def sequence_mask(lengths, maxlen, dtype=torch.bool): + if maxlen is None: + maxlen = lengths.max() + mask = ~(torch.ones((len(lengths), maxlen)).to(lengths.device).cumsum(dim=1).t() > lengths).t() + mask.type(dtype) + return mask + + +def group_hidden_by_segs(h, seg_ids, max_len): + """ + :param h: [B, T, H] + :param seg_ids: [B, T] + :return: h_ph: [B, T_ph, H] + """ + B, T, H = h.shape + h_gby_segs = h.new_zeros([B, max_len + 1, H]).scatter_add_(1, seg_ids[:, :, None].repeat([1, 1, H]), h) + all_ones = h.new_ones(h.shape[:2]) + cnt_gby_segs = h.new_zeros([B, max_len + 1]).scatter_add_(1, seg_ids, all_ones).contiguous() + h_gby_segs = h_gby_segs[:, 1:] + cnt_gby_segs = cnt_gby_segs[:, 1:] + h_gby_segs = h_gby_segs / torch.clamp(cnt_gby_segs[:, :, None], min=1) + # assert h_gby_segs.shape[-1] == 192 + return h_gby_segs + +class GraphAuxEnc(nn.Module): + def __init__(self, in_dim, hid_dim, out_dim, n_iterations=5, n_edge_types=6): + super(GraphAuxEnc, self).__init__() + self.in_dim = in_dim + self.hid_dim = hid_dim + self.out_dim = out_dim + self.skip_connect = True + self.dropout_after_gae = False + + self.ggc_1 = GatedGraphConv(in_feats=in_dim, out_feats=hid_dim + , n_steps=n_iterations, n_etypes=n_edge_types) + self.ggc_2 = GatedGraphConv(in_feats=hid_dim, out_feats=out_dim + , n_steps=n_iterations, n_etypes=n_edge_types) + self.dropout = nn.Dropout(p=0.5) + + @staticmethod + def ph_encoding_to_word_encoding(ph_encoding, ph2word, word_len): + """ + ph_encoding: [batch, t_p, hid] + ph2word: tensor [batch, t_w] + word_len: tensor [batch] + """ + word_encoding_for_graph, batch_word_encoding, has_word_row_idx = GraphAuxEnc._process_ph_to_word_encoding( + ph_encoding, + ph2word, + word_len) + # [batch, t_w, hid] + return batch_word_encoding, word_encoding_for_graph + + def pad_word_encoding_to_phoneme(self, word_encoding, ph2word, t_p): + return self._postprocess_word2ph(word_encoding, ph2word, t_p) + + @staticmethod + def _process_ph_to_word_encoding(ph_encoding, ph2word, word_len=None): + """ + ph_encoding: [batch, t_p, hid] + ph2word: tensor [batch, t_w] + word_len: tensor [batch] + """ + word_len = word_len.reshape([-1,]) + max_len = max(word_len) + num_nodes = sum(word_len) + + batch_word_encoding = group_hidden_by_segs(ph_encoding, ph2word, max_len) + bs, t_p, hid = batch_word_encoding.shape + has_word_mask = sequence_mask(word_len, max_len) # [batch, t_p, 1] + word_encoding = batch_word_encoding.reshape([bs * t_p, hid]) + has_word_row_idx = has_word_mask.reshape([-1]) + word_encoding = word_encoding[has_word_row_idx] + assert word_encoding.shape[0] == num_nodes + return word_encoding, batch_word_encoding, has_word_row_idx + + @staticmethod + def _postprocess_word2ph(word_encoding, ph2word, t_p): + word_encoding = F.pad(word_encoding,[0,0,1,0]) + ph2word_ = ph2word[:, :, None].repeat([1, 1, word_encoding.shape[-1]]) + out = torch.gather(word_encoding, 1, ph2word_) # [B, T, H] + return out + + @staticmethod + def _repeat_one_sequence(x, d, T): + """Repeat each frame according to duration.""" + if d.sum() == 0: + d = d.fill_(1) + hid = x.shape[-1] + expanded_lst = [x_.repeat(int(d_), 1) for x_, d_ in zip(x, d) if d_ != 0] + expanded = torch.cat(expanded_lst, dim=0) + if T > expanded.shape[0]: + expanded = torch.cat([expanded, torch.zeros([T - expanded.shape[0], hid]).to(expanded.device)], dim=0) + return expanded + + def word_forward(self, graph_lst, word_encoding, etypes_lst): + """ + word encoding in, word encoding out. + """ + batched_graph = dgl.batch(graph_lst) + inp = word_encoding + batched_etypes = torch.cat(etypes_lst) # [num_edges_in_batch, 1] + assert batched_graph.num_nodes() == inp.shape[0] + + gcc1_out = self.ggc_1(batched_graph, inp, batched_etypes) + if self.dropout_after_gae: + gcc1_out = self.dropout(gcc1_out) + gcc2_out = self.ggc_2(batched_graph, gcc1_out, batched_etypes) # [num_nodes_in_batch, hin] + if self.dropout_after_gae: + gcc2_out = self.ggc_2(batched_graph, gcc2_out, batched_etypes) + if self.skip_connect: + assert self.in_dim == self.hid_dim and self.hid_dim == self.out_dim + gcc2_out = inp + gcc1_out + gcc2_out + + word_len = torch.tensor([g.num_nodes() for g in graph_lst]).reshape([-1]) + max_len = max(word_len) + has_word_mask = sequence_mask(word_len, max_len) # [batch, t_p, 1] + has_word_row_idx = has_word_mask.reshape([-1]) + bs = len(graph_lst) + t_w = max([g.num_nodes() for g in graph_lst]) + hid = word_encoding.shape[-1] + output = torch.zeros([bs * t_w, hid]).to(gcc2_out.device) + output[has_word_row_idx] = gcc2_out + output = output.reshape([bs, t_w, hid]) + word_level_output = output + return torch.transpose(word_level_output, 1, 2) + + def forward(self, graph_lst, ph_encoding, ph2word, etypes_lst, return_word_encoding=False): + """ + graph_lst: [list of dgl_graph] + ph_encoding: [batch, hid, t_p] + ph2word: [list of list[1,2,2,2,3,3,3]] + etypes_lst: [list of etypes]; etypes: torch.LongTensor + """ + t_p = ph_encoding.shape[-1] + ph_encoding = ph_encoding.transpose(1,2) # [batch, t_p, hid] + word_len = torch.tensor([g.num_nodes() for g in graph_lst]).reshape([-1]) + batched_graph = dgl.batch(graph_lst) + inp, batched_word_encoding, has_word_row_idx = self._process_ph_to_word_encoding(ph_encoding, ph2word, + word_len=word_len) # [num_nodes_in_batch, in_dim] + bs, t_w, hid = batched_word_encoding.shape + batched_etypes = torch.cat(etypes_lst) # [num_edges_in_batch, 1] + gcc1_out = self.ggc_1(batched_graph, inp, batched_etypes) + gcc2_out = self.ggc_2(batched_graph, gcc1_out, batched_etypes) # [num_nodes_in_batch, hin] + # skip connection + gcc2_out = inp + gcc1_out + gcc2_out # [n_nodes, hid] + + output = torch.zeros([bs * t_w, hid]).to(gcc2_out.device) + output[has_word_row_idx] = gcc2_out + output = output.reshape([bs, t_w, hid]) + word_level_output = output + output = self._postprocess_word2ph(word_level_output, ph2word, t_p) # [batch, t_p, hid] + output = torch.transpose(output, 1, 2) + + if return_word_encoding: + return output, torch.transpose(word_level_output, 1, 2) + else: + return output + +if __name__ == '__main__': + # Unit Test for batching graphs + from text_to_speech.modules.tts.syntaspeech.syntactic_graph_buider import Sentence2GraphParser, plot_dgl_sentence_graph + parser = Sentence2GraphParser("en") + + # Unit Test for English Graph Builder + text1 = "To be or not to be , that 's a question ." + text2 = "I love you . You love me . Mixue ice-scream and tea ." + graph1, etypes1 = parser.parse(text1) + graph2, etypes2 = parser.parse(text2) + batched_text = " " + text1 + " " + " " + " " + text2 + " " + batched_nodes = [graph1.num_nodes(), graph2.num_nodes()] + plot_dgl_sentence_graph(dgl.batch([graph1, graph2]), {i: w for i, w in enumerate(batched_text.split(" "))}) + etypes_lst = [etypes1, etypes2] + + # Unit Test for Graph Encoder forward + in_feats = 4 + out_feats = 4 + enc = GraphAuxEnc(in_dim=in_feats, hid_dim=in_feats, out_dim=out_feats) + ph2word = torch.tensor([ + [1, 2, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0], + [1, 2, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + ]) + inp = torch.randn([2, in_feats, 17]) # [N_sentence, feat, ph_length] + graph_lst = [graph1, graph2] + out = enc(graph_lst, inp, ph2word, etypes_lst) + print(out.shape) # [N_sentence, feat, ph_length] diff --git a/text_to_speech/modules/tts/syntaspeech/syntaspeech.py b/text_to_speech/modules/tts/syntaspeech/syntaspeech.py new file mode 100644 index 0000000000000000000000000000000000000000..a530acad5e5a0bcc547d6a866156cf2c357eeda6 --- /dev/null +++ b/text_to_speech/modules/tts/syntaspeech/syntaspeech.py @@ -0,0 +1,277 @@ +import math +import torch +import torch.nn.functional as F +from torch import nn +from torch.nn import Linear + +from text_to_speech.modules.commons.conv import ConvBlocks, ConditionalConvBlocks +from text_to_speech.modules.commons.layers import Embedding +from text_to_speech.modules.commons.rel_transformer import RelTransformerEncoder +from text_to_speech.modules.commons.transformer import MultiheadAttention, FFTBlocks +from text_to_speech.modules.tts.commons.align_ops import clip_mel2token_to_multiple, build_word_mask, expand_states, mel2ph_to_mel2word +from text_to_speech.modules.tts.fs import FS_DECODERS, FastSpeech +from text_to_speech.modules.tts.portaspeech.fvae import SyntaFVAE, FVAE +from text_to_speech.utils.commons.meters import Timer +from text_to_speech.utils.nn.seq_utils import group_hidden_by_segs +from text_to_speech.modules.commons.nar_tts_modules import SyntaDurationPredictor + + +class SinusoidalPosEmb(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x): + """ + + :param x: [B, T] + :return: [B, T, H] + """ + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device) * -emb) + emb = x[:, :, None] * emb[None, :] + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +class SyntaSpeech(FastSpeech): + def __init__(self, ph_dict_size, word_dict_size, hparams, out_dims=None): + super().__init__(ph_dict_size, hparams, out_dims) + # build linguistic encoder + if hparams['num_spk'] > 1: + self.spk_embed_proj = Embedding(hparams['num_spk'], self.hidden_size) + if hparams['use_word_encoder']: + self.word_encoder = RelTransformerEncoder( + word_dict_size, self.hidden_size, self.hidden_size, self.hidden_size, 2, + hparams['word_enc_layers'], hparams['enc_ffn_kernel_size']) + if hparams['dur_level'] == 'word': + if hparams['word_encoder_type'] == 'rel_fft': + self.ph2word_encoder = RelTransformerEncoder( + 0, self.hidden_size, self.hidden_size, self.hidden_size, 2, + hparams['word_enc_layers'], hparams['enc_ffn_kernel_size']) + if hparams['word_encoder_type'] == 'fft': + self.ph2word_encoder = FFTBlocks( + self.hidden_size, hparams['word_enc_layers'], 1, num_heads=hparams['num_heads']) + self.sin_pos = SinusoidalPosEmb(self.hidden_size) + self.enc_pos_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) + self.dec_query_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) + self.dec_res_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) + self.attn = MultiheadAttention(self.hidden_size, 1, encoder_decoder_attention=True, bias=False) + self.attn.enable_torch_version = False + if hparams['text_encoder_postnet']: + self.text_encoder_postnet = ConvBlocks( + self.hidden_size, self.hidden_size, [1] * 3, 5, layers_in_block=2) + else: + self.sin_pos = SinusoidalPosEmb(self.hidden_size) + + predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size + self.dur_predictor = SyntaDurationPredictor( + self.hidden_size, + n_chans=predictor_hidden, + n_layers=hparams['dur_predictor_layers'], + dropout_rate=hparams['predictor_dropout'], + kernel_size=hparams['dur_predictor_kernel']) + # build VAE decoder + if hparams['use_fvae']: + del self.decoder + del self.mel_out + if hparams.get("use_gae_in_prior", True): + self.fvae = SyntaFVAE( + c_in_out=self.out_dims, + hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'], + kernel_size=hparams['fvae_kernel_size'], + enc_n_layers=hparams['fvae_enc_n_layers'], + dec_n_layers=hparams['fvae_dec_n_layers'], + c_cond=self.hidden_size, + use_prior_flow=hparams['use_prior_flow'], + flow_hidden=hparams['prior_flow_hidden'], + flow_kernel_size=hparams['prior_flow_kernel_size'], + flow_n_steps=hparams['prior_flow_n_blocks'], + strides=[hparams['fvae_strides']], + encoder_type=hparams['fvae_encoder_type'], + decoder_type=hparams['fvae_decoder_type'], + ) + else: + self.fvae = FVAE( + c_in_out=self.out_dims, + hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'], + kernel_size=hparams['fvae_kernel_size'], + enc_n_layers=hparams['fvae_enc_n_layers'], + dec_n_layers=hparams['fvae_dec_n_layers'], + c_cond=self.hidden_size, + use_prior_flow=hparams['use_prior_flow'], + flow_hidden=hparams['prior_flow_hidden'], + flow_kernel_size=hparams['prior_flow_kernel_size'], + flow_n_steps=hparams['prior_flow_n_blocks'], + strides=[hparams['fvae_strides']], + encoder_type=hparams['fvae_encoder_type'], + decoder_type=hparams['fvae_decoder_type'], + ) + else: + self.decoder = FS_DECODERS[hparams['decoder_type']](hparams) + self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True) + if hparams['use_pitch_embed']: + self.pitch_embed = Embedding(300, self.hidden_size, 0) + if self.hparams['add_word_pos']: + self.word_pos_proj = Linear(self.hidden_size, self.hidden_size) + + def build_embedding(self, dictionary, embed_dim): + num_embeddings = len(dictionary) + emb = Embedding(num_embeddings, embed_dim, self.padding_idx) + return emb + + def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None, + spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None, + global_step=None, graph_lst=None, etypes_lst=None, *args, **kwargs): + + if self.hparams['use_spk_embed']: + spk_embed = spk_embed + elif self.hparams['use_spk_id']: + spk_embed = self.spk_embed_proj(spk_id)[:, None, :] + else: + spk_embed = 0 + + ret = {} + style_embed = self.forward_style_embed(spk_embed, spk_id) # speaker embedding, [B, 1, C] + x, tgt_nonpadding = self.run_text_encoder( + txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, graph_lst=graph_lst, etypes_lst=etypes_lst, **kwargs) + x = x + style_embed # it maybe necessary to achieve multi-speaker + x = x * tgt_nonpadding + ret['nonpadding'] = tgt_nonpadding + if self.hparams['use_pitch_embed']: + x = x + self.pitch_embed(pitch) + ret['decoder_inp'] = x + if infer and (mel2ph is None or mel2word is None): + mel2word = ret['mel2word'] + ret['mel_out_fvae'] = ret['mel_out'] = self.run_decoder(x, tgt_nonpadding, ret, infer, tgt_mels, global_step, + mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst) + return ret + + def run_text_encoder(self, txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, graph_lst, etypes_lst, **kwargs): + word2word = torch.arange(word_len)[None, :].to(ph2word.device) + 1 # [B, T_mel, T_word] + src_nonpadding = (txt_tokens > 0).float()[:, :, None] + use_bert = self.hparams.get("use_bert") is True + if use_bert: + ph_encoder_out = self.encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=ph2word, + graph_lst=graph_lst, etypes_lst=etypes_lst, + cl_feats=kwargs['cl_feats'], ret=ret) * src_nonpadding + style_embed + else: + ph_encoder_out = self.encoder(txt_tokens) * src_nonpadding + style_embed + if self.hparams['use_word_encoder']: + word_encoder_out = self.word_encoder(word_tokens) + style_embed + ph_encoder_out = ph_encoder_out + expand_states(word_encoder_out, ph2word) + + dur_input = ph_encoder_out * src_nonpadding + if self.hparams['dur_level'] == 'word': + word_encoder_out = 0 + h_ph_gb_word = group_hidden_by_segs(ph_encoder_out, ph2word, word_len)[0] + word_encoder_out = word_encoder_out + self.ph2word_encoder(h_ph_gb_word) + if self.hparams['use_word_encoder']: + word_encoder_out = word_encoder_out + self.word_encoder(word_tokens) + mel2word = self.forward_dur(dur_input, mel2word, ret, ph2word=ph2word, word_len=word_len, graph_lst=graph_lst, etypes_lst=etypes_lst) + mel2word = clip_mel2token_to_multiple(mel2word, self.hparams['frames_multiple']) + ret['mel2word'] = mel2word + tgt_nonpadding = (mel2word > 0).float()[:, :, None] + enc_pos = self.get_pos_embed(word2word, ph2word) # [B, T_ph, H] + dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H] + dec_word_mask = build_word_mask(mel2word, ph2word) # [B, T_mel, T_ph] + x, weight = self.attention(ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask) + if self.hparams['add_word_pos']: + x = x + self.word_pos_proj(dec_pos) + ret['attn'] = weight + else: + mel2ph = self.forward_dur(dur_input, mel2ph, ret) + mel2ph = clip_mel2token_to_multiple(mel2ph, self.hparams['frames_multiple']) + mel2word = mel2ph_to_mel2word(mel2ph, ph2word) + x = expand_states(ph_encoder_out, mel2ph) + if self.hparams['add_word_pos']: + dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H] + x = x + self.word_pos_proj(dec_pos) + tgt_nonpadding = (mel2ph > 0).float()[:, :, None] + if self.hparams['use_word_encoder']: + x = x + expand_states(word_encoder_out, mel2word) + return x, tgt_nonpadding + + def attention(self, ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask): + ph_kv = self.enc_pos_proj(torch.cat([ph_encoder_out, enc_pos], -1)) + word_enc_out_expend = expand_states(word_encoder_out, mel2word) + word_enc_out_expend = torch.cat([word_enc_out_expend, dec_pos], -1) + if self.hparams['text_encoder_postnet']: + word_enc_out_expend = self.dec_res_proj(word_enc_out_expend) + word_enc_out_expend = self.text_encoder_postnet(word_enc_out_expend) + dec_q = x_res = word_enc_out_expend + else: + dec_q = self.dec_query_proj(word_enc_out_expend) + x_res = self.dec_res_proj(word_enc_out_expend) + ph_kv, dec_q = ph_kv.transpose(0, 1), dec_q.transpose(0, 1) + x, (weight, _) = self.attn(dec_q, ph_kv, ph_kv, attn_mask=(1 - dec_word_mask) * -1e9) + x = x.transpose(0, 1) + x = x + x_res + return x, weight + + def run_decoder(self, x, tgt_nonpadding, ret, infer, tgt_mels=None, global_step=0, + mel2word=None, ph2word=None, graph_lst=None, etypes_lst=None): + if not self.hparams['use_fvae']: + x = self.decoder(x) + x = self.mel_out(x) + ret['kl'] = 0 + return x * tgt_nonpadding + else: + # x is the phoneme encoding + x = x.transpose(1, 2) # [B, H, T] + tgt_nonpadding_BHT = tgt_nonpadding.transpose(1, 2) # [B, H, T] + if infer: + z = self.fvae(cond=x, infer=True, mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst) + else: + tgt_mels = tgt_mels.transpose(1, 2) # [B, 80, T] + z, ret['kl'], ret['z_p'], ret['m_q'], ret['logs_q'] = self.fvae( + tgt_mels, tgt_nonpadding_BHT, cond=x, mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst) + if global_step < self.hparams['posterior_start_steps']: + z = torch.randn_like(z) + x_recon = self.fvae.decoder(z, nonpadding=tgt_nonpadding_BHT, cond=x).transpose(1, 2) + ret['pre_mel_out'] = x_recon + return x_recon + + def forward_dur(self, dur_input, mel2word, ret, **kwargs): + """ + + :param dur_input: [B, T_txt, H] + :param mel2ph: [B, T_mel] + :param txt_tokens: [B, T_txt] + :param ret: + :return: + """ + word_len = kwargs['word_len'] + ph2word = kwargs['ph2word'] + graph_lst = kwargs['graph_lst'] + etypes_lst = kwargs['etypes_lst'] + src_padding = dur_input.data.abs().sum(-1) == 0 + dur_input = dur_input.detach() + self.hparams['predictor_grad'] * (dur_input - dur_input.detach()) + dur = self.dur_predictor(dur_input, src_padding, ph2word, graph_lst, etypes_lst) + + B, T_ph = ph2word.shape + dur = torch.zeros([B, word_len.max() + 1]).to(ph2word.device).scatter_add(1, ph2word, dur) + dur = dur[:, 1:] + ret['dur'] = dur + if mel2word is None: + mel2word = self.length_regulator(dur).detach() + return mel2word + + def get_pos_embed(self, word2word, x2word): + x_pos = build_word_mask(word2word, x2word).float() # [B, T_word, T_ph] + x_pos = (x_pos.cumsum(-1) / x_pos.sum(-1).clamp(min=1)[..., None] * x_pos).sum(1) + x_pos = self.sin_pos(x_pos.float()) # [B, T_ph, H] + return x_pos + + def store_inverse_all(self): + def remove_weight_norm(m): + try: + if hasattr(m, 'store_inverse'): + m.store_inverse() + nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(remove_weight_norm) diff --git a/text_to_speech/modules/vocoder/hifigan/__pycache__/hifigan.cpython-38.pyc b/text_to_speech/modules/vocoder/hifigan/__pycache__/hifigan.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62fbcb42b76f276fca9ce59f1121d9764653c417 Binary files /dev/null and b/text_to_speech/modules/vocoder/hifigan/__pycache__/hifigan.cpython-38.pyc differ diff --git a/text_to_speech/modules/vocoder/hifigan/hifigan.py b/text_to_speech/modules/vocoder/hifigan/hifigan.py new file mode 100644 index 0000000000000000000000000000000000000000..b0d776618c6c11feb4d136e8bed4a210cc3f708e --- /dev/null +++ b/text_to_speech/modules/vocoder/hifigan/hifigan.py @@ -0,0 +1,338 @@ +import torch +import torch.nn.functional as F +import torch.nn as nn +from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +import numpy as np + +LRELU_SLOPE = 0.1 + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def apply_weight_norm(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + weight_norm(m) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +class ResBlock1(torch.nn.Module): + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): + super(ResBlock1, self).__init__() + self.h = h + self.convs1 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]))) + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))) + ]) + self.convs2.apply(init_weights) + + def forward(self, x): + for c1, c2 in zip(self.convs1, self.convs2): + xt = F.leaky_relu(x, LRELU_SLOPE) + xt = c1(xt) + xt = F.leaky_relu(xt, LRELU_SLOPE) + xt = c2(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + + +class ResBlock2(torch.nn.Module): + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): + super(ResBlock2, self).__init__() + self.h = h + self.convs = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))) + ]) + self.convs.apply(init_weights) + + def forward(self, x): + for c in self.convs: + xt = F.leaky_relu(x, LRELU_SLOPE) + xt = c(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class Conv1d1x1(Conv1d): + """1x1 Conv1d with customized initialization.""" + + def __init__(self, in_channels, out_channels, bias): + """Initialize 1x1 Conv1d module.""" + super(Conv1d1x1, self).__init__(in_channels, out_channels, + kernel_size=1, padding=0, + dilation=1, bias=bias) + + +class HifiGanGenerator(torch.nn.Module): + def __init__(self, h, c_out=1): + super(HifiGanGenerator, self).__init__() + self.h = h + self.num_kernels = len(h['resblock_kernel_sizes']) + self.num_upsamples = len(h['upsample_rates']) + + self.conv_pre = weight_norm(Conv1d(80, h['upsample_initial_channel'], 7, 1, padding=3)) + resblock = ResBlock1 if h['resblock'] == '1' else ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(h['upsample_rates'], h['upsample_kernel_sizes'])): + c_cur = h['upsample_initial_channel'] // (2 ** (i + 1)) + self.ups.append(weight_norm( + ConvTranspose1d(c_cur * 2, c_cur, k, u, padding=(k - u) // 2))) + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = h['upsample_initial_channel'] // (2 ** (i + 1)) + for j, (k, d) in enumerate(zip(h['resblock_kernel_sizes'], h['resblock_dilation_sizes'])): + self.resblocks.append(resblock(h, ch, k, d)) + + self.conv_post = weight_norm(Conv1d(ch, c_out, 7, 1, padding=3)) + self.ups.apply(init_weights) + self.conv_post.apply(init_weights) + + def forward(self, x, f0=None): + x = self.conv_pre(x) + for i in range(self.num_upsamples): + x = F.leaky_relu(x, LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + remove_weight_norm(self.conv_pre) + remove_weight_norm(self.conv_post) + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False, use_cond=False, c_in=1): + super(DiscriminatorP, self).__init__() + self.use_cond = use_cond + if use_cond: + from text_to_speech.utils.commons.hparams import hparams + t = hparams['hop_size'] + self.cond_net = torch.nn.ConvTranspose1d(80, 1, t * 2, stride=t, padding=t // 2) + c_in = 2 + + self.period = period + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv2d(c_in, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), + ]) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x, mel): + fmap = [] + if self.use_cond: + x_mel = self.cond_net(mel) + x = torch.cat([x_mel, x], 1) + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_cond=False, c_in=1): + super(MultiPeriodDiscriminator, self).__init__() + self.discriminators = nn.ModuleList([ + DiscriminatorP(2, use_cond=use_cond, c_in=c_in), + DiscriminatorP(3, use_cond=use_cond, c_in=c_in), + DiscriminatorP(5, use_cond=use_cond, c_in=c_in), + DiscriminatorP(7, use_cond=use_cond, c_in=c_in), + DiscriminatorP(11, use_cond=use_cond, c_in=c_in), + ]) + + def forward(self, y, y_hat, mel=None): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y, mel) + y_d_g, fmap_g = d(y_hat, mel) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False, use_cond=False, upsample_rates=None, c_in=1): + super(DiscriminatorS, self).__init__() + self.use_cond = use_cond + if use_cond: + t = np.prod(upsample_rates) + self.cond_net = torch.nn.ConvTranspose1d(80, 1, t * 2, stride=t, padding=t // 2) + c_in = 2 + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv1d(c_in, 128, 15, 1, padding=7)), + norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), + norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), + norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ]) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x, mel): + if self.use_cond: + x_mel = self.cond_net(mel) + x = torch.cat([x_mel, x], 1) + fmap = [] + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiScaleDiscriminator(torch.nn.Module): + def __init__(self, use_cond=False, c_in=1): + super(MultiScaleDiscriminator, self).__init__() + from text_to_speech.utils.commons.hparams import hparams + self.discriminators = nn.ModuleList([ + DiscriminatorS(use_spectral_norm=True, use_cond=use_cond, + upsample_rates=[4, 4, hparams['hop_size'] // 16], + c_in=c_in), + DiscriminatorS(use_cond=use_cond, + upsample_rates=[4, 4, hparams['hop_size'] // 32], + c_in=c_in), + DiscriminatorS(use_cond=use_cond, + upsample_rates=[4, 4, hparams['hop_size'] // 64], + c_in=c_in), + ]) + self.meanpools = nn.ModuleList([ + AvgPool1d(4, 2, padding=1), + AvgPool1d(4, 2, padding=1) + ]) + + def forward(self, y, y_hat, mel=None): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + if i != 0: + y = self.meanpools[i - 1](y) + y_hat = self.meanpools[i - 1](y_hat) + y_d_r, fmap_r = d(y, mel) + y_d_g, fmap_g = d(y_hat, mel) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +def feature_loss(fmap_r, fmap_g): + loss = 0 + for dr, dg in zip(fmap_r, fmap_g): + for rl, gl in zip(dr, dg): + loss += torch.mean(torch.abs(rl - gl)) + + return loss * 2 + + +def discriminator_loss(disc_real_outputs, disc_generated_outputs): + r_losses = 0 + g_losses = 0 + for dr, dg in zip(disc_real_outputs, disc_generated_outputs): + r_loss = torch.mean((1 - dr) ** 2) + g_loss = torch.mean(dg ** 2) + r_losses += r_loss + g_losses += g_loss + r_losses = r_losses / len(disc_real_outputs) + g_losses = g_losses / len(disc_real_outputs) + return r_losses, g_losses + + +def cond_discriminator_loss(outputs): + loss = 0 + for dg in outputs: + g_loss = torch.mean(dg ** 2) + loss += g_loss + loss = loss / len(outputs) + return loss + + +def generator_loss(disc_outputs): + loss = 0 + for dg in disc_outputs: + l = torch.mean((1 - dg) ** 2) + loss += l + loss = loss / len(disc_outputs) + return loss diff --git a/text_to_speech/modules/vocoder/hifigan/mel_utils.py b/text_to_speech/modules/vocoder/hifigan/mel_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a75fce72db54812320bc60aedfdd378ccecb3374 --- /dev/null +++ b/text_to_speech/modules/vocoder/hifigan/mel_utils.py @@ -0,0 +1,80 @@ +import numpy as np +import torch +import torch.utils.data +from librosa.filters import mel as librosa_mel_fn +from scipy.io.wavfile import read + +MAX_WAV_VALUE = 32768.0 + + +def load_wav(full_path): + sampling_rate, data = read(full_path) + return data, sampling_rate + + +def dynamic_range_compression(x, C=1, clip_val=1e-5): + return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) + + +def dynamic_range_decompression(x, C=1): + return np.exp(x) / C + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): + return torch.log(torch.clamp(x, min=clip_val) * C) + + +def dynamic_range_decompression_torch(x, C=1): + return torch.exp(x) / C + + +def spectral_normalize_torch(magnitudes): + output = dynamic_range_compression_torch(magnitudes) + return output + + +def spectral_de_normalize_torch(magnitudes): + output = dynamic_range_decompression_torch(magnitudes) + return output + + +mel_basis = {} +hann_window = {} + + +def mel_spectrogram(y, hparams, center=False, complex=False): + # hop_size: 512 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate) + # win_size: 2048 # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate) + # fmin: 55 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) + # fmax: 10000 # To be increased/reduced depending on data. + # fft_size: 2048 # Extra window size is filled with 0 paddings to match this parameter + # n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, + n_fft = hparams['fft_size'] + num_mels = hparams['audio_num_mel_bins'] + sampling_rate = hparams['audio_sample_rate'] + hop_size = hparams['hop_size'] + win_size = hparams['win_size'] + fmin = hparams['fmin'] + fmax = hparams['fmax'] + y = y.clamp(min=-1., max=1.) + global mel_basis, hann_window + if fmax not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[str(fmax) + '_' + str(y.device)] = torch.from_numpy(mel).float().to(y.device) + hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) + + y = torch.nn.functional.pad(y.unsqueeze(1), [int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)], + mode='reflect') + y = y.squeeze(1) + + spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)], + center=center, pad_mode='reflect', normalized=False, onesided=True) + + if not complex: + spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9)) + spec = torch.matmul(mel_basis[str(fmax) + '_' + str(y.device)], spec) + spec = spectral_normalize_torch(spec) + else: + B, C, T, _ = spec.shape + spec = spec.transpose(1, 2) # [B, T, n_fft, 2] + return spec diff --git a/text_to_speech/modules/vocoder/hifigan/stft_loss.py b/text_to_speech/modules/vocoder/hifigan/stft_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..e47447455341e5725d6f82ded66dc08b5d2b1cc5 --- /dev/null +++ b/text_to_speech/modules/vocoder/hifigan/stft_loss.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- + +# Copyright 2019 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""STFT-based Loss modules.""" + +import torch +import torch.nn.functional as F + + +def stft(x, fft_size, hop_size, win_length, window): + """Perform STFT and convert to magnitude spectrogram. + Args: + x (Tensor): Input signal tensor (B, T). + fft_size (int): FFT size. + hop_size (int): Hop size. + win_length (int): Window length. + window (str): Window function type. + Returns: + Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1). + """ + x_stft = torch.stft(x, fft_size, hop_size, win_length, window) + real = x_stft[..., 0] + imag = x_stft[..., 1] + + # NOTE(kan-bayashi): clamp is needed to avoid nan or inf + return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1) + + +class SpectralConvergengeLoss(torch.nn.Module): + """Spectral convergence loss module.""" + + def __init__(self): + """Initilize spectral convergence loss module.""" + super(SpectralConvergengeLoss, self).__init__() + + def forward(self, x_mag, y_mag): + """Calculate forward propagation. + Args: + x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). + y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). + Returns: + Tensor: Spectral convergence loss value. + """ + return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro") + + +class LogSTFTMagnitudeLoss(torch.nn.Module): + """Log STFT magnitude loss module.""" + + def __init__(self): + """Initilize los STFT magnitude loss module.""" + super(LogSTFTMagnitudeLoss, self).__init__() + + def forward(self, x_mag, y_mag): + """Calculate forward propagation. + Args: + x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). + y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). + Returns: + Tensor: Log STFT magnitude loss value. + """ + return F.l1_loss(torch.log(y_mag), torch.log(x_mag)) + + +class STFTLoss(torch.nn.Module): + """STFT loss module.""" + + def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window"): + """Initialize STFT loss module.""" + super(STFTLoss, self).__init__() + self.fft_size = fft_size + self.shift_size = shift_size + self.win_length = win_length + self.window = getattr(torch, window)(win_length) + self.spectral_convergenge_loss = SpectralConvergengeLoss() + self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss() + + def forward(self, x, y): + """Calculate forward propagation. + Args: + x (Tensor): Predicted signal (B, T). + y (Tensor): Groundtruth signal (B, T). + Returns: + Tensor: Spectral convergence loss value. + Tensor: Log STFT magnitude loss value. + """ + x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window.to(x.get_device())) + y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window.to(x.get_device())) + sc_loss = self.spectral_convergenge_loss(x_mag, y_mag) + mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag) + + return sc_loss, mag_loss + + +class MultiResolutionSTFTLoss(torch.nn.Module): + """Multi resolution STFT loss module.""" + + def __init__(self, + fft_sizes=[1024, 2048, 512], + hop_sizes=[120, 240, 50], + win_lengths=[600, 1200, 240], + window="hann_window"): + """Initialize Multi resolution STFT loss module. + Args: + fft_sizes (list): List of FFT sizes. + hop_sizes (list): List of hop sizes. + win_lengths (list): List of window lengths. + window (str): Window function type. + """ + super(MultiResolutionSTFTLoss, self).__init__() + assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) + self.stft_losses = torch.nn.ModuleList() + for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths): + self.stft_losses += [STFTLoss(fs, ss, wl, window)] + + def forward(self, x, y): + """Calculate forward propagation. + Args: + x (Tensor): Predicted signal (B, T). + y (Tensor): Groundtruth signal (B, T). + Returns: + Tensor: Multi resolution spectral convergence loss value. + Tensor: Multi resolution log STFT magnitude loss value. + """ + sc_loss = 0.0 + mag_loss = 0.0 + for f in self.stft_losses: + sc_l, mag_l = f(x, y) + sc_loss += sc_l + mag_loss += mag_l + sc_loss /= len(self.stft_losses) + mag_loss /= len(self.stft_losses) + + return sc_loss, mag_loss \ No newline at end of file diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/__init__.py b/text_to_speech/modules/vocoder/parallel_wavegan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/layers/__init__.py b/text_to_speech/modules/vocoder/parallel_wavegan/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2fc2a9e79504bdc90dafd6f07f28d39c5004753e --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/layers/__init__.py @@ -0,0 +1,5 @@ +from .causal_conv import * # NOQA +from .pqmf import * # NOQA +from .residual_block import * # NOQA +from text_to_speech.modules.vocoder.parallel_wavegan.layers.residual_stack import * # NOQA +from .upsample import * # NOQA diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/layers/causal_conv.py b/text_to_speech/modules/vocoder/parallel_wavegan/layers/causal_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..fca77daf65f234e6fbe355ed148fc8f0ee85038a --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/layers/causal_conv.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""Causal convolusion layer modules.""" + + +import torch + + +class CausalConv1d(torch.nn.Module): + """CausalConv1d module with customized initialization.""" + + def __init__(self, in_channels, out_channels, kernel_size, + dilation=1, bias=True, pad="ConstantPad1d", pad_params={"value": 0.0}): + """Initialize CausalConv1d module.""" + super(CausalConv1d, self).__init__() + self.pad = getattr(torch.nn, pad)((kernel_size - 1) * dilation, **pad_params) + self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size, + dilation=dilation, bias=bias) + + def forward(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, in_channels, T). + + Returns: + Tensor: Output tensor (B, out_channels, T). + + """ + return self.conv(self.pad(x))[:, :, :x.size(2)] + + +class CausalConvTranspose1d(torch.nn.Module): + """CausalConvTranspose1d module with customized initialization.""" + + def __init__(self, in_channels, out_channels, kernel_size, stride, bias=True): + """Initialize CausalConvTranspose1d module.""" + super(CausalConvTranspose1d, self).__init__() + self.deconv = torch.nn.ConvTranspose1d( + in_channels, out_channels, kernel_size, stride, bias=bias) + self.stride = stride + + def forward(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, in_channels, T_in). + + Returns: + Tensor: Output tensor (B, out_channels, T_out). + + """ + return self.deconv(x)[:, :, :-self.stride] diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/layers/pqmf.py b/text_to_speech/modules/vocoder/parallel_wavegan/layers/pqmf.py new file mode 100644 index 0000000000000000000000000000000000000000..bb31c430d2abe0219f58f153f69d836383e095ef --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/layers/pqmf.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""Pseudo QMF modules.""" + +import numpy as np +import torch +import torch.nn.functional as F + +from scipy.signal import kaiser + + +def design_prototype_filter(taps=62, cutoff_ratio=0.15, beta=9.0): + """Design prototype filter for PQMF. + + This method is based on `A Kaiser window approach for the design of prototype + filters of cosine modulated filterbanks`_. + + Args: + taps (int): The number of filter taps. + cutoff_ratio (float): Cut-off frequency ratio. + beta (float): Beta coefficient for kaiser window. + + Returns: + ndarray: Impluse response of prototype filter (taps + 1,). + + .. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`: + https://ieeexplore.ieee.org/abstract/document/681427 + + """ + # check the arguments are valid + assert taps % 2 == 0, "The number of taps mush be even number." + assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0." + + # make initial filter + omega_c = np.pi * cutoff_ratio + with np.errstate(invalid='ignore'): + h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) \ + / (np.pi * (np.arange(taps + 1) - 0.5 * taps)) + h_i[taps // 2] = np.cos(0) * cutoff_ratio # fix nan due to indeterminate form + + # apply kaiser window + w = kaiser(taps + 1, beta) + h = h_i * w + + return h + + +class PQMF(torch.nn.Module): + """PQMF module. + + This module is based on `Near-perfect-reconstruction pseudo-QMF banks`_. + + .. _`Near-perfect-reconstruction pseudo-QMF banks`: + https://ieeexplore.ieee.org/document/258122 + + """ + + def __init__(self, subbands=4, taps=62, cutoff_ratio=0.15, beta=9.0): + """Initilize PQMF module. + + Args: + subbands (int): The number of subbands. + taps (int): The number of filter taps. + cutoff_ratio (float): Cut-off frequency ratio. + beta (float): Beta coefficient for kaiser window. + + """ + super(PQMF, self).__init__() + + # define filter coefficient + h_proto = design_prototype_filter(taps, cutoff_ratio, beta) + h_analysis = np.zeros((subbands, len(h_proto))) + h_synthesis = np.zeros((subbands, len(h_proto))) + for k in range(subbands): + h_analysis[k] = 2 * h_proto * np.cos( + (2 * k + 1) * (np.pi / (2 * subbands)) * + (np.arange(taps + 1) - ((taps - 1) / 2)) + + (-1) ** k * np.pi / 4) + h_synthesis[k] = 2 * h_proto * np.cos( + (2 * k + 1) * (np.pi / (2 * subbands)) * + (np.arange(taps + 1) - ((taps - 1) / 2)) - + (-1) ** k * np.pi / 4) + + # convert to tensor + analysis_filter = torch.from_numpy(h_analysis).float().unsqueeze(1) + synthesis_filter = torch.from_numpy(h_synthesis).float().unsqueeze(0) + + # register coefficients as beffer + self.register_buffer("analysis_filter", analysis_filter) + self.register_buffer("synthesis_filter", synthesis_filter) + + # filter for downsampling & upsampling + updown_filter = torch.zeros((subbands, subbands, subbands)).float() + for k in range(subbands): + updown_filter[k, k, 0] = 1.0 + self.register_buffer("updown_filter", updown_filter) + self.subbands = subbands + + # keep padding info + self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0) + + def analysis(self, x): + """Analysis with PQMF. + + Args: + x (Tensor): Input tensor (B, 1, T). + + Returns: + Tensor: Output tensor (B, subbands, T // subbands). + + """ + x = F.conv1d(self.pad_fn(x), self.analysis_filter) + return F.conv1d(x, self.updown_filter, stride=self.subbands) + + def synthesis(self, x): + """Synthesis with PQMF. + + Args: + x (Tensor): Input tensor (B, subbands, T // subbands). + + Returns: + Tensor: Output tensor (B, 1, T). + + """ + # NOTE(kan-bayashi): Power will be dreased so here multipy by # subbands. + # Not sure this is the correct way, it is better to check again. + # TODO(kan-bayashi): Understand the reconstruction procedure + x = F.conv_transpose1d(x, self.updown_filter * self.subbands, stride=self.subbands) + return F.conv1d(self.pad_fn(x), self.synthesis_filter) diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/layers/residual_block.py b/text_to_speech/modules/vocoder/parallel_wavegan/layers/residual_block.py new file mode 100644 index 0000000000000000000000000000000000000000..7a267a86c1fa521c2824addf9dda304c43f1ff1f --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/layers/residual_block.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- + +"""Residual block module in WaveNet. + +This code is modified from https://github.com/r9y9/wavenet_vocoder. + +""" + +import math + +import torch +import torch.nn.functional as F + + +class Conv1d(torch.nn.Conv1d): + """Conv1d module with customized initialization.""" + + def __init__(self, *args, **kwargs): + """Initialize Conv1d module.""" + super(Conv1d, self).__init__(*args, **kwargs) + + def reset_parameters(self): + """Reset parameters.""" + torch.nn.init.kaiming_normal_(self.weight, nonlinearity="relu") + if self.bias is not None: + torch.nn.init.constant_(self.bias, 0.0) + + +class Conv1d1x1(Conv1d): + """1x1 Conv1d with customized initialization.""" + + def __init__(self, in_channels, out_channels, bias): + """Initialize 1x1 Conv1d module.""" + super(Conv1d1x1, self).__init__(in_channels, out_channels, + kernel_size=1, padding=0, + dilation=1, bias=bias) + + +class ResidualBlock(torch.nn.Module): + """Residual block module in WaveNet.""" + + def __init__(self, + kernel_size=3, + residual_channels=64, + gate_channels=128, + skip_channels=64, + aux_channels=80, + dropout=0.0, + dilation=1, + bias=True, + use_causal_conv=False + ): + """Initialize ResidualBlock module. + + Args: + kernel_size (int): Kernel size of dilation convolution layer. + residual_channels (int): Number of channels for residual connection. + skip_channels (int): Number of channels for skip connection. + aux_channels (int): Local conditioning channels i.e. auxiliary input dimension. + dropout (float): Dropout probability. + dilation (int): Dilation factor. + bias (bool): Whether to add bias parameter in convolution layers. + use_causal_conv (bool): Whether to use use_causal_conv or non-use_causal_conv convolution. + + """ + super(ResidualBlock, self).__init__() + self.dropout = dropout + # no future time stamps available + if use_causal_conv: + padding = (kernel_size - 1) * dilation + else: + assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." + padding = (kernel_size - 1) // 2 * dilation + self.use_causal_conv = use_causal_conv + + # dilation conv + self.conv = Conv1d(residual_channels, gate_channels, kernel_size, + padding=padding, dilation=dilation, bias=bias) + + # local conditioning + if aux_channels > 0: + self.conv1x1_aux = Conv1d1x1(aux_channels, gate_channels, bias=False) + else: + self.conv1x1_aux = None + + # conv output is split into two groups + gate_out_channels = gate_channels // 2 + self.conv1x1_out = Conv1d1x1(gate_out_channels, residual_channels, bias=bias) + self.conv1x1_skip = Conv1d1x1(gate_out_channels, skip_channels, bias=bias) + + def forward(self, x, c): + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, residual_channels, T). + c (Tensor): Local conditioning auxiliary tensor (B, aux_channels, T). + + Returns: + Tensor: Output tensor for residual connection (B, residual_channels, T). + Tensor: Output tensor for skip connection (B, skip_channels, T). + + """ + residual = x + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.conv(x) + + # remove future time steps if use_causal_conv conv + x = x[:, :, :residual.size(-1)] if self.use_causal_conv else x + + # split into two part for gated activation + splitdim = 1 + xa, xb = x.split(x.size(splitdim) // 2, dim=splitdim) + + # local conditioning + if c is not None: + assert self.conv1x1_aux is not None + c = self.conv1x1_aux(c) + ca, cb = c.split(c.size(splitdim) // 2, dim=splitdim) + xa, xb = xa + ca, xb + cb + + x = torch.tanh(xa) * torch.sigmoid(xb) + + # for skip connection + s = self.conv1x1_skip(x) + + # for residual connection + x = (self.conv1x1_out(x) + residual) * math.sqrt(0.5) + + return x, s diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/layers/residual_stack.py b/text_to_speech/modules/vocoder/parallel_wavegan/layers/residual_stack.py new file mode 100644 index 0000000000000000000000000000000000000000..6e07c8803ad348dd923f6b7c0f7aff14aab9cf78 --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/layers/residual_stack.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""Residual stack module in MelGAN.""" + +import torch + +from . import CausalConv1d + + +class ResidualStack(torch.nn.Module): + """Residual stack module introduced in MelGAN.""" + + def __init__(self, + kernel_size=3, + channels=32, + dilation=1, + bias=True, + nonlinear_activation="LeakyReLU", + nonlinear_activation_params={"negative_slope": 0.2}, + pad="ReflectionPad1d", + pad_params={}, + use_causal_conv=False, + ): + """Initialize ResidualStack module. + + Args: + kernel_size (int): Kernel size of dilation convolution layer. + channels (int): Number of channels of convolution layers. + dilation (int): Dilation factor. + bias (bool): Whether to add bias parameter in convolution layers. + nonlinear_activation (str): Activation function module name. + nonlinear_activation_params (dict): Hyperparameters for activation function. + pad (str): Padding function module name before dilated convolution layer. + pad_params (dict): Hyperparameters for padding function. + use_causal_conv (bool): Whether to use causal convolution. + + """ + super(ResidualStack, self).__init__() + + # defile residual stack part + if not use_causal_conv: + assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." + self.stack = torch.nn.Sequential( + getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), + getattr(torch.nn, pad)((kernel_size - 1) // 2 * dilation, **pad_params), + torch.nn.Conv1d(channels, channels, kernel_size, dilation=dilation, bias=bias), + getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), + torch.nn.Conv1d(channels, channels, 1, bias=bias), + ) + else: + self.stack = torch.nn.Sequential( + getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), + CausalConv1d(channels, channels, kernel_size, dilation=dilation, + bias=bias, pad=pad, pad_params=pad_params), + getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), + torch.nn.Conv1d(channels, channels, 1, bias=bias), + ) + + # defile extra layer for skip connection + self.skip_layer = torch.nn.Conv1d(channels, channels, 1, bias=bias) + + def forward(self, c): + """Calculate forward propagation. + + Args: + c (Tensor): Input tensor (B, channels, T). + + Returns: + Tensor: Output tensor (B, chennels, T). + + """ + return self.stack(c) + self.skip_layer(c) diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/layers/tf_layers.py b/text_to_speech/modules/vocoder/parallel_wavegan/layers/tf_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..c0f46bd755c161cda2ac904fe37f3f3c6357a88d --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/layers/tf_layers.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 MINH ANH (@dathudeptrai) +# MIT License (https://opensource.org/licenses/MIT) + +"""Tensorflow Layer modules complatible with pytorch.""" + +import tensorflow as tf + + +class TFReflectionPad1d(tf.keras.layers.Layer): + """Tensorflow ReflectionPad1d module.""" + + def __init__(self, padding_size): + """Initialize TFReflectionPad1d module. + + Args: + padding_size (int): Padding size. + + """ + super(TFReflectionPad1d, self).__init__() + self.padding_size = padding_size + + @tf.function + def call(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, T, 1, C). + + Returns: + Tensor: Padded tensor (B, T + 2 * padding_size, 1, C). + + """ + return tf.pad(x, [[0, 0], [self.padding_size, self.padding_size], [0, 0], [0, 0]], "REFLECT") + + +class TFConvTranspose1d(tf.keras.layers.Layer): + """Tensorflow ConvTranspose1d module.""" + + def __init__(self, channels, kernel_size, stride, padding): + """Initialize TFConvTranspose1d( module. + + Args: + channels (int): Number of channels. + kernel_size (int): kernel size. + strides (int): Stride width. + padding (str): Padding type ("same" or "valid"). + + """ + super(TFConvTranspose1d, self).__init__() + self.conv1d_transpose = tf.keras.layers.Conv2DTranspose( + filters=channels, + kernel_size=(kernel_size, 1), + strides=(stride, 1), + padding=padding, + ) + + @tf.function + def call(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, T, 1, C). + + Returns: + Tensors: Output tensor (B, T', 1, C'). + + """ + x = self.conv1d_transpose(x) + return x + + +class TFResidualStack(tf.keras.layers.Layer): + """Tensorflow ResidualStack module.""" + + def __init__(self, + kernel_size, + channels, + dilation, + bias, + nonlinear_activation, + nonlinear_activation_params, + padding, + ): + """Initialize TFResidualStack module. + + Args: + kernel_size (int): Kernel size. + channles (int): Number of channels. + dilation (int): Dilation ine. + bias (bool): Whether to add bias parameter in convolution layers. + nonlinear_activation (str): Activation function module name. + nonlinear_activation_params (dict): Hyperparameters for activation function. + padding (str): Padding type ("same" or "valid"). + + """ + super(TFResidualStack, self).__init__() + self.block = [ + getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params), + TFReflectionPad1d(dilation), + tf.keras.layers.Conv2D( + filters=channels, + kernel_size=(kernel_size, 1), + dilation_rate=(dilation, 1), + use_bias=bias, + padding="valid", + ), + getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params), + tf.keras.layers.Conv2D(filters=channels, kernel_size=1, use_bias=bias) + ] + self.shortcut = tf.keras.layers.Conv2D(filters=channels, kernel_size=1, use_bias=bias) + + @tf.function + def call(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, T, 1, C). + + Returns: + Tensor: Output tensor (B, T, 1, C). + + """ + _x = tf.identity(x) + for i, layer in enumerate(self.block): + _x = layer(_x) + shortcut = self.shortcut(x) + return shortcut + _x diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/layers/upsample.py b/text_to_speech/modules/vocoder/parallel_wavegan/layers/upsample.py new file mode 100644 index 0000000000000000000000000000000000000000..18c6397c420a81fadc5320e3a48f3249534decd8 --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/layers/upsample.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- + +"""Upsampling module. + +This code is modified from https://github.com/r9y9/wavenet_vocoder. + +""" + +import numpy as np +import torch +import torch.nn.functional as F + +from . import Conv1d + + +class Stretch2d(torch.nn.Module): + """Stretch2d module.""" + + def __init__(self, x_scale, y_scale, mode="nearest"): + """Initialize Stretch2d module. + + Args: + x_scale (int): X scaling factor (Time axis in spectrogram). + y_scale (int): Y scaling factor (Frequency axis in spectrogram). + mode (str): Interpolation mode. + + """ + super(Stretch2d, self).__init__() + self.x_scale = x_scale + self.y_scale = y_scale + self.mode = mode + + def forward(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, C, F, T). + + Returns: + Tensor: Interpolated tensor (B, C, F * y_scale, T * x_scale), + + """ + return F.interpolate( + x, scale_factor=(self.y_scale, self.x_scale), mode=self.mode) + + +class Conv2d(torch.nn.Conv2d): + """Conv2d module with customized initialization.""" + + def __init__(self, *args, **kwargs): + """Initialize Conv2d module.""" + super(Conv2d, self).__init__(*args, **kwargs) + + def reset_parameters(self): + """Reset parameters.""" + self.weight.data.fill_(1. / np.prod(self.kernel_size)) + if self.bias is not None: + torch.nn.init.constant_(self.bias, 0.0) + + +class UpsampleNetwork(torch.nn.Module): + """Upsampling network module.""" + + def __init__(self, + upsample_scales, + nonlinear_activation=None, + nonlinear_activation_params={}, + interpolate_mode="nearest", + freq_axis_kernel_size=1, + use_causal_conv=False, + ): + """Initialize upsampling network module. + + Args: + upsample_scales (list): List of upsampling scales. + nonlinear_activation (str): Activation function name. + nonlinear_activation_params (dict): Arguments for specified activation function. + interpolate_mode (str): Interpolation mode. + freq_axis_kernel_size (int): Kernel size in the direction of frequency axis. + + """ + super(UpsampleNetwork, self).__init__() + self.use_causal_conv = use_causal_conv + self.up_layers = torch.nn.ModuleList() + for scale in upsample_scales: + # interpolation layer + stretch = Stretch2d(scale, 1, interpolate_mode) + self.up_layers += [stretch] + + # conv layer + assert (freq_axis_kernel_size - 1) % 2 == 0, "Not support even number freq axis kernel size." + freq_axis_padding = (freq_axis_kernel_size - 1) // 2 + kernel_size = (freq_axis_kernel_size, scale * 2 + 1) + if use_causal_conv: + padding = (freq_axis_padding, scale * 2) + else: + padding = (freq_axis_padding, scale) + conv = Conv2d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) + self.up_layers += [conv] + + # nonlinear + if nonlinear_activation is not None: + nonlinear = getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params) + self.up_layers += [nonlinear] + + def forward(self, c): + """Calculate forward propagation. + + Args: + c : Input tensor (B, C, T). + + Returns: + Tensor: Upsampled tensor (B, C, T'), where T' = T * prod(upsample_scales). + + """ + c = c.unsqueeze(1) # (B, 1, C, T) + for f in self.up_layers: + if self.use_causal_conv and isinstance(f, Conv2d): + c = f(c)[..., :c.size(-1)] + else: + c = f(c) + return c.squeeze(1) # (B, C, T') + + +class ConvInUpsampleNetwork(torch.nn.Module): + """Convolution + upsampling network module.""" + + def __init__(self, + upsample_scales, + nonlinear_activation=None, + nonlinear_activation_params={}, + interpolate_mode="nearest", + freq_axis_kernel_size=1, + aux_channels=80, + aux_context_window=0, + use_causal_conv=False + ): + """Initialize convolution + upsampling network module. + + Args: + upsample_scales (list): List of upsampling scales. + nonlinear_activation (str): Activation function name. + nonlinear_activation_params (dict): Arguments for specified activation function. + mode (str): Interpolation mode. + freq_axis_kernel_size (int): Kernel size in the direction of frequency axis. + aux_channels (int): Number of channels of pre-convolutional layer. + aux_context_window (int): Context window size of the pre-convolutional layer. + use_causal_conv (bool): Whether to use causal structure. + + """ + super(ConvInUpsampleNetwork, self).__init__() + self.aux_context_window = aux_context_window + self.use_causal_conv = use_causal_conv and aux_context_window > 0 + # To capture wide-context information in conditional features + kernel_size = aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1 + # NOTE(kan-bayashi): Here do not use padding because the input is already padded + self.conv_in = Conv1d(aux_channels, aux_channels, kernel_size=kernel_size, bias=False) + self.upsample = UpsampleNetwork( + upsample_scales=upsample_scales, + nonlinear_activation=nonlinear_activation, + nonlinear_activation_params=nonlinear_activation_params, + interpolate_mode=interpolate_mode, + freq_axis_kernel_size=freq_axis_kernel_size, + use_causal_conv=use_causal_conv, + ) + + def forward(self, c): + """Calculate forward propagation. + + Args: + c : Input tensor (B, C, T'). + + Returns: + Tensor: Upsampled tensor (B, C, T), + where T = (T' - aux_context_window * 2) * prod(upsample_scales). + + Note: + The length of inputs considers the context window size. + + """ + c_ = self.conv_in(c) + c = c_[:, :, :-self.aux_context_window] if self.use_causal_conv else c_ + return self.upsample(c) diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/losses/__init__.py b/text_to_speech/modules/vocoder/parallel_wavegan/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b03080a907cb5cb4b316ceb74866ddbc406b33bf --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/losses/__init__.py @@ -0,0 +1 @@ +from .stft_loss import * # NOQA diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/losses/stft_loss.py b/text_to_speech/modules/vocoder/parallel_wavegan/losses/stft_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..adb5767eb6e48b79c9811139091522cf635b5697 --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/losses/stft_loss.py @@ -0,0 +1,154 @@ +# -*- coding: utf-8 -*- + +# Copyright 2019 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""STFT-based Loss modules.""" + +import torch +import torch.nn.functional as F + + +def stft(x, fft_size, hop_size, win_length, window): + """Perform STFT and convert to magnitude spectrogram. + + Args: + x (Tensor): Input signal tensor (B, T). + fft_size (int): FFT size. + hop_size (int): Hop size. + win_length (int): Window length. + window (str): Window function type. + + Returns: + Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1). + + """ + x_stft = torch.stft(x, fft_size, hop_size, win_length, window) + real = x_stft[..., 0] + imag = x_stft[..., 1] + + # NOTE(kan-bayashi): clamp is needed to avoid nan or inf + return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1) + + +class SpectralConvergengeLoss(torch.nn.Module): + """Spectral convergence loss module.""" + + def __init__(self): + """Initilize spectral convergence loss module.""" + super(SpectralConvergengeLoss, self).__init__() + + def forward(self, x_mag, y_mag): + """Calculate forward propagation. + + Args: + x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). + y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). + + Returns: + Tensor: Spectral convergence loss value. + + """ + return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro") + + +class LogSTFTMagnitudeLoss(torch.nn.Module): + """Log STFT magnitude loss module.""" + + def __init__(self): + """Initilize los STFT magnitude loss module.""" + super(LogSTFTMagnitudeLoss, self).__init__() + + def forward(self, x_mag, y_mag): + """Calculate forward propagation. + + Args: + x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). + y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). + + Returns: + Tensor: Log STFT magnitude loss value. + + """ + return F.l1_loss(torch.log(y_mag), torch.log(x_mag)) + + +class STFTLoss(torch.nn.Module): + """STFT loss module.""" + + def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window"): + """Initialize STFT loss module.""" + super(STFTLoss, self).__init__() + self.fft_size = fft_size + self.shift_size = shift_size + self.win_length = win_length + self.window = getattr(torch, window)(win_length) + self.spectral_convergenge_loss = SpectralConvergengeLoss() + self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss() + + def forward(self, x, y): + """Calculate forward propagation. + + Args: + x (Tensor): Predicted signal (B, T). + y (Tensor): Groundtruth signal (B, T). + + Returns: + Tensor: Spectral convergence loss value. + Tensor: Log STFT magnitude loss value. + + """ + self.window = self.window.to(x.device) + x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window) + y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window) + sc_loss = self.spectral_convergenge_loss(x_mag, y_mag) + mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag) + + return sc_loss, mag_loss + + +class MultiResolutionSTFTLoss(torch.nn.Module): + """Multi resolution STFT loss module.""" + + def __init__(self, + fft_sizes=[1024, 2048, 512], + hop_sizes=[120, 240, 50], + win_lengths=[600, 1200, 240], + window="hann_window"): + """Initialize Multi resolution STFT loss module. + + Args: + fft_sizes (list): List of FFT sizes. + hop_sizes (list): List of hop sizes. + win_lengths (list): List of window lengths. + window (str): Window function type. + + """ + super(MultiResolutionSTFTLoss, self).__init__() + assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) + self.stft_losses = torch.nn.ModuleList() + for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths): + self.stft_losses += [STFTLoss(fs, ss, wl, window)] + + def forward(self, x, y): + """Calculate forward propagation. + + Args: + x (Tensor): Predicted signal (B, T). + y (Tensor): Groundtruth signal (B, T). + + Returns: + Tensor: Multi resolution spectral convergence loss value. + Tensor: Multi resolution log STFT magnitude loss value. + + """ + sc_loss = 0.0 + mag_loss = 0.0 + for f in self.stft_losses: + sc_l, mag_l = f(x, y) + sc_loss += sc_l + mag_loss += mag_l + sc_loss /= len(self.stft_losses) + mag_loss /= len(self.stft_losses) + + return sc_loss, mag_loss diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/models/__init__.py b/text_to_speech/modules/vocoder/parallel_wavegan/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4803ba6b2a0afc8022e756ae5b3f4c7403c3c1bd --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/models/__init__.py @@ -0,0 +1,2 @@ +from .melgan import * # NOQA +from .parallel_wavegan import * # NOQA diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/models/freq_discriminator.py b/text_to_speech/modules/vocoder/parallel_wavegan/models/freq_discriminator.py new file mode 100644 index 0000000000000000000000000000000000000000..876b66ff931335ae16a5c36f95beb33e789a3f7d --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/models/freq_discriminator.py @@ -0,0 +1,149 @@ +import torch +import torch.nn as nn + + +class BasicDiscriminatorBlock(nn.Module): + def __init__(self, in_channel, out_channel): + super(BasicDiscriminatorBlock, self).__init__() + self.block = nn.Sequential( + nn.utils.weight_norm(nn.Conv1d( + in_channel, + out_channel, + kernel_size=3, + stride=2, + padding=1, + )), + nn.LeakyReLU(0.2, True), + + nn.utils.weight_norm(nn.Conv1d( + out_channel, + out_channel, + kernel_size=3, + stride=1, + padding=1, + )), + nn.LeakyReLU(0.2, True), + + nn.utils.weight_norm(nn.Conv1d( + out_channel, + out_channel, + kernel_size=3, + stride=1, + padding=1, + )), + nn.LeakyReLU(0.2, True), + + nn.utils.weight_norm(nn.Conv1d( + out_channel, + out_channel, + kernel_size=3, + stride=1, + padding=1, + )), + + ) + + def forward(self, x): + return self.block(x) + + +class ResDiscriminatorBlock(nn.Module): + def __init__(self, in_channel, out_channel): + super(ResDiscriminatorBlock, self).__init__() + self.block1 = nn.Sequential( + nn.utils.weight_norm(nn.Conv1d( + in_channel, + out_channel, + kernel_size=3, + stride=2, + padding=1, + )), + nn.LeakyReLU(0.2, True), + + nn.utils.weight_norm(nn.Conv1d( + out_channel, + out_channel, + kernel_size=3, + stride=1, + padding=1, + )), + ) + + self.shortcut1 = nn.utils.weight_norm(nn.Conv1d( + in_channel, + out_channel, + kernel_size=1, + stride=2, + )) + + self.block2 = nn.Sequential( + nn.utils.weight_norm(nn.Conv1d( + out_channel, + out_channel, + kernel_size=3, + stride=1, + padding=1, + )), + nn.LeakyReLU(0.2, True), + + nn.utils.weight_norm(nn.Conv1d( + out_channel, + out_channel, + kernel_size=3, + stride=1, + padding=1, + )), + ) + + self.shortcut2 = nn.utils.weight_norm(nn.Conv1d( + out_channel, + out_channel, + kernel_size=1, + stride=1, + )) + + def forward(self, x): + x1 = self.block1(x) + x1 = x1 + self.shortcut1(x) + return self.block2(x1) + self.shortcut2(x1) + + +class ResNet18Discriminator(nn.Module): + def __init__(self, stft_channel, in_channel=64): + super(ResNet18Discriminator, self).__init__() + self.input = nn.Sequential( + nn.utils.weight_norm(nn.Conv1d(stft_channel, in_channel, kernel_size=7, stride=2, padding=1, )), + nn.LeakyReLU(0.2, True), + ) + self.df1 = BasicDiscriminatorBlock(in_channel, in_channel) + self.df2 = ResDiscriminatorBlock(in_channel, in_channel * 2) + self.df3 = ResDiscriminatorBlock(in_channel * 2, in_channel * 4) + self.df4 = ResDiscriminatorBlock(in_channel * 4, in_channel * 8) + + def forward(self, x): + x = self.input(x) + x = self.df1(x) + x = self.df2(x) + x = self.df3(x) + return self.df4(x) + + +class FrequencyDiscriminator(nn.Module): + def __init__(self, in_channel=64, fft_size=1024, hop_length=256, win_length=1024, window="hann_window"): + super(FrequencyDiscriminator, self).__init__() + self.fft_size = fft_size + self.hop_length = hop_length + self.win_length = win_length + self.window = nn.Parameter(getattr(torch, window)(win_length), requires_grad=False) + self.stft_channel = fft_size // 2 + 1 + self.resnet_disc = ResNet18Discriminator(self.stft_channel, in_channel) + + def forward(self, x): + x_stft = torch.stft(x, self.fft_size, self.hop_length, self.win_length, self.window) + real = x_stft[..., 0] + imag = x_stft[..., 1] + + x_real = self.resnet_disc(real) + x_imag = self.resnet_disc(imag) + + return x_real, x_imag diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/models/melgan.py b/text_to_speech/modules/vocoder/parallel_wavegan/models/melgan.py new file mode 100644 index 0000000000000000000000000000000000000000..f0bc957ff29ba5a54f5913685ac35d6da70e88c6 --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/models/melgan.py @@ -0,0 +1,458 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""MelGAN Modules.""" + +import logging + +import numpy as np +import torch +from torch import nn + +from text_to_speech.modules.vocoder.parallel_wavegan.layers import CausalConv1d +from text_to_speech.modules.vocoder.parallel_wavegan.layers import CausalConvTranspose1d +from text_to_speech.modules.vocoder.parallel_wavegan.layers import ResidualStack +from text_to_speech.modules.vocoder.parallel_wavegan.models.source import SourceModuleCycNoise_v1 + + +class MelGANGenerator(torch.nn.Module): + """MelGAN generator module.""" + + def __init__(self, + in_channels=80, + out_channels=1, + kernel_size=7, + channels=512, + bias=True, + upsample_scales=[8, 8, 2, 2], + stack_kernel_size=3, + stacks=3, + nonlinear_activation="LeakyReLU", + nonlinear_activation_params={"negative_slope": 0.2}, + pad="ReflectionPad1d", + pad_params={}, + use_final_nonlinear_activation=True, + use_weight_norm=True, + use_causal_conv=False, + use_pitch_embed=False, + use_nsf=False, + sample_rate=22050, + **kwargs + ): + """Initialize MelGANGenerator module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_size (int): Kernel size of initial and final conv layer. + channels (int): Initial number of channels for conv layer. + bias (bool): Whether to add bias parameter in convolution layers. + upsample_scales (list): List of upsampling scales. + stack_kernel_size (int): Kernel size of dilated conv layers in residual stack. + stacks (int): Number of stacks in a single residual stack. + nonlinear_activation (str): Activation function module name. + nonlinear_activation_params (dict): Hyperparameters for activation function. + pad (str): Padding function module name before dilated convolution layer. + pad_params (dict): Hyperparameters for padding function. + use_final_nonlinear_activation (torch.nn.Module): Activation function for the final layer. + use_weight_norm (bool): Whether to use weight norm. + If set to true, it will be applied to all of the conv layers. + use_causal_conv (bool): Whether to use causal convolution. + + """ + super(MelGANGenerator, self).__init__() + + # check hyper parameters is valid + assert channels >= np.prod(upsample_scales) + assert channels % (2 ** len(upsample_scales)) == 0 + if not use_causal_conv: + assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." + + # add initial layer + layers = [] + if not use_causal_conv: + layers += [ + getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params), + torch.nn.Conv1d(in_channels, channels, kernel_size, bias=bias), + ] + else: + layers += [ + CausalConv1d(in_channels, channels, kernel_size, + bias=bias, pad=pad, pad_params=pad_params), + ] + + self.use_pitch_embed = use_pitch_embed + if use_pitch_embed: + self.pitch_embed = nn.Embedding(300, in_channels, 0) + self.c_proj = nn.Conv1d(2 * in_channels, in_channels, 1) + + for i, upsample_scale in enumerate(upsample_scales): + # add upsampling layer + layers += [getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)] + if not use_causal_conv: + layers += [ + torch.nn.ConvTranspose1d( + channels // (2 ** i), + channels // (2 ** (i + 1)), + upsample_scale * 2, + stride=upsample_scale, + padding=upsample_scale // 2 + upsample_scale % 2, + output_padding=upsample_scale % 2, + bias=bias, + ) + ] + else: + layers += [ + CausalConvTranspose1d( + channels // (2 ** i), + channels // (2 ** (i + 1)), + upsample_scale * 2, + stride=upsample_scale, + bias=bias, + ) + ] + + # add residual stack + for j in range(stacks): + layers += [ + ResidualStack( + kernel_size=stack_kernel_size, + channels=channels // (2 ** (i + 1)), + dilation=stack_kernel_size ** j, + bias=bias, + nonlinear_activation=nonlinear_activation, + nonlinear_activation_params=nonlinear_activation_params, + pad=pad, + pad_params=pad_params, + use_causal_conv=use_causal_conv, + ) + ] + self.use_nsf = use_nsf + if use_nsf: + self.harmonic_num = 8 + hop_size = np.prod(upsample_scales) + self.f0_upsamp = torch.nn.Upsample(scale_factor=hop_size) + # self.m_source = SourceModuleHnNSF(sampling_rate=sample_rate, harmonic_num=self.harmonic_num) + self.m_source = SourceModuleCycNoise_v1(sample_rate, 0.003) + self.nsf_conv = nn.Sequential(nn.Conv1d(1, channels // (2 ** (i + 1)), 1), torch.nn.Tanh()) + + # define the model as a single function + self.melgan_body = torch.nn.Sequential(*layers) + layers = [] + # add final layer + layers += [getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)] + if not use_causal_conv: + layers += [ + getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params), + torch.nn.Conv1d(channels // (2 ** (i + 1)), out_channels, kernel_size, bias=bias), + ] + else: + layers += [ + CausalConv1d(channels // (2 ** (i + 1)), out_channels, kernel_size, + bias=bias, pad=pad, pad_params=pad_params), + ] + if use_final_nonlinear_activation: + layers += [torch.nn.Tanh()] + + # define the model as a single function + self.melgan_final = torch.nn.Sequential(*layers) + + # apply weight norm + if use_weight_norm: + self.apply_weight_norm() + + # reset parameters + self.reset_parameters() + + def forward(self, c, f0=None, pitch=None): + """Calculate forward propagation. + + Args: + c (Tensor): Input tensor (B, channels, T). + + Returns: + Tensor: Output tensor (B, 1, T ** prod(upsample_scales)). + + """ + if self.use_pitch_embed: + c = self.c_proj(torch.cat([c, self.pitch_embed(pitch).transpose(1, 2)], 1)) + x = self.melgan_body(c) + if self.use_nsf: + f0_upsample = self.f0_upsamp(f0[:, None, :]) + f0_upsample = self.nsf_conv(f0_upsample) + x = x + f0_upsample + x = self.melgan_final(x) + return x + + def remove_weight_norm(self): + """Remove weight normalization module from all of the layers.""" + def _remove_weight_norm(m): + try: + logging.debug(f"Weight norm is removed from {m}.") + torch.nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(_remove_weight_norm) + + def apply_weight_norm(self): + """Apply weight normalization module from all of the layers.""" + def _apply_weight_norm(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d): + torch.nn.utils.weight_norm(m) + logging.debug(f"Weight norm is applied to {m}.") + + self.apply(_apply_weight_norm) + + def reset_parameters(self): + """Reset parameters. + + This initialization follows official implementation manner. + https://github.com/descriptinc/melgan-neurips/blob/master/spec2wav/modules.py + + """ + def _reset_parameters(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d): + m.weight.data.normal_(0.0, 0.02) + logging.debug(f"Reset parameters in {m}.") + + self.apply(_reset_parameters) + + +class MelGANDiscriminator(torch.nn.Module): + """MelGAN discriminator module.""" + + def __init__(self, + in_channels=1, + out_channels=1, + kernel_sizes=[5, 3], + channels=16, + max_downsample_channels=1024, + bias=True, + downsample_scales=[4, 4, 4, 4], + nonlinear_activation="LeakyReLU", + nonlinear_activation_params={"negative_slope": 0.2}, + pad="ReflectionPad1d", + pad_params={}, + ): + """Initilize MelGAN discriminator module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer, + and the first and the second kernel sizes will be used for the last two layers. + For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15, + the last two layers' kernel size will be 5 and 3, respectively. + channels (int): Initial number of channels for conv layer. + max_downsample_channels (int): Maximum number of channels for downsampling layers. + bias (bool): Whether to add bias parameter in convolution layers. + downsample_scales (list): List of downsampling scales. + nonlinear_activation (str): Activation function module name. + nonlinear_activation_params (dict): Hyperparameters for activation function. + pad (str): Padding function module name before dilated convolution layer. + pad_params (dict): Hyperparameters for padding function. + + """ + super(MelGANDiscriminator, self).__init__() + self.layers = torch.nn.ModuleList() + + # check kernel size is valid + assert len(kernel_sizes) == 2 + assert kernel_sizes[0] % 2 == 1 + assert kernel_sizes[1] % 2 == 1 + + # add first layer + self.layers += [ + torch.nn.Sequential( + getattr(torch.nn, pad)((np.prod(kernel_sizes) - 1) // 2, **pad_params), + torch.nn.Conv1d(in_channels, channels, np.prod(kernel_sizes), bias=bias), + getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), + ) + ] + + # add downsample layers + in_chs = channels + for downsample_scale in downsample_scales: + out_chs = min(in_chs * downsample_scale, max_downsample_channels) + self.layers += [ + torch.nn.Sequential( + torch.nn.Conv1d( + in_chs, out_chs, + kernel_size=downsample_scale * 10 + 1, + stride=downsample_scale, + padding=downsample_scale * 5, + groups=in_chs // 4, + bias=bias, + ), + getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), + ) + ] + in_chs = out_chs + + # add final layers + out_chs = min(in_chs * 2, max_downsample_channels) + self.layers += [ + torch.nn.Sequential( + torch.nn.Conv1d( + in_chs, out_chs, kernel_sizes[0], + padding=(kernel_sizes[0] - 1) // 2, + bias=bias, + ), + getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), + ) + ] + self.layers += [ + torch.nn.Conv1d( + out_chs, out_channels, kernel_sizes[1], + padding=(kernel_sizes[1] - 1) // 2, + bias=bias, + ), + ] + + def forward(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input noise signal (B, 1, T). + + Returns: + List: List of output tensors of each layer. + + """ + outs = [] + for f in self.layers: + x = f(x) + outs += [x] + + return outs + + +class MelGANMultiScaleDiscriminator(torch.nn.Module): + """MelGAN multi-scale discriminator module.""" + + def __init__(self, + in_channels=1, + out_channels=1, + scales=3, + downsample_pooling="AvgPool1d", + # follow the official implementation setting + downsample_pooling_params={ + "kernel_size": 4, + "stride": 2, + "padding": 1, + "count_include_pad": False, + }, + kernel_sizes=[5, 3], + channels=16, + max_downsample_channels=1024, + bias=True, + downsample_scales=[4, 4, 4, 4], + nonlinear_activation="LeakyReLU", + nonlinear_activation_params={"negative_slope": 0.2}, + pad="ReflectionPad1d", + pad_params={}, + use_weight_norm=True, + **kwargs + ): + """Initilize MelGAN multi-scale discriminator module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + downsample_pooling (str): Pooling module name for downsampling of the inputs. + downsample_pooling_params (dict): Parameters for the above pooling module. + kernel_sizes (list): List of two kernel sizes. The sum will be used for the first conv layer, + and the first and the second kernel sizes will be used for the last two layers. + channels (int): Initial number of channels for conv layer. + max_downsample_channels (int): Maximum number of channels for downsampling layers. + bias (bool): Whether to add bias parameter in convolution layers. + downsample_scales (list): List of downsampling scales. + nonlinear_activation (str): Activation function module name. + nonlinear_activation_params (dict): Hyperparameters for activation function. + pad (str): Padding function module name before dilated convolution layer. + pad_params (dict): Hyperparameters for padding function. + use_causal_conv (bool): Whether to use causal convolution. + + """ + super(MelGANMultiScaleDiscriminator, self).__init__() + self.discriminators = torch.nn.ModuleList() + + # add discriminators + for _ in range(scales): + self.discriminators += [ + MelGANDiscriminator( + in_channels=in_channels, + out_channels=out_channels, + kernel_sizes=kernel_sizes, + channels=channels, + max_downsample_channels=max_downsample_channels, + bias=bias, + downsample_scales=downsample_scales, + nonlinear_activation=nonlinear_activation, + nonlinear_activation_params=nonlinear_activation_params, + pad=pad, + pad_params=pad_params, + ) + ] + self.pooling = getattr(torch.nn, downsample_pooling)(**downsample_pooling_params) + + # apply weight norm + if use_weight_norm: + self.apply_weight_norm() + + # reset parameters + self.reset_parameters() + + def forward(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input noise signal (B, 1, T). + + Returns: + List: List of list of each discriminator outputs, which consists of each layer output tensors. + + """ + outs = [] + for f in self.discriminators: + outs += [f(x)] + x = self.pooling(x) + + return outs + + def remove_weight_norm(self): + """Remove weight normalization module from all of the layers.""" + def _remove_weight_norm(m): + try: + logging.debug(f"Weight norm is removed from {m}.") + torch.nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(_remove_weight_norm) + + def apply_weight_norm(self): + """Apply weight normalization module from all of the layers.""" + def _apply_weight_norm(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d): + torch.nn.utils.weight_norm(m) + logging.debug(f"Weight norm is applied to {m}.") + + self.apply(_apply_weight_norm) + + def reset_parameters(self): + """Reset parameters. + + This initialization follows official implementation manner. + https://github.com/descriptinc/melgan-neurips/blob/master/spec2wav/modules.py + + """ + def _reset_parameters(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d): + m.weight.data.normal_(0.0, 0.02) + logging.debug(f"Reset parameters in {m}.") + + self.apply(_reset_parameters) diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/models/parallel_wavegan.py b/text_to_speech/modules/vocoder/parallel_wavegan/models/parallel_wavegan.py new file mode 100644 index 0000000000000000000000000000000000000000..2543d8eefc031675078c5ee98665066100986c49 --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/models/parallel_wavegan.py @@ -0,0 +1,461 @@ +# -*- coding: utf-8 -*- + +# Copyright 2019 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""Parallel WaveGAN Modules.""" + +import logging +import math + +import torch +from torch import nn + +from text_to_speech.modules.vocoder.parallel_wavegan.layers import Conv1d +from text_to_speech.modules.vocoder.parallel_wavegan.layers import Conv1d1x1 +from text_to_speech.modules.vocoder.parallel_wavegan.layers import ResidualBlock +from text_to_speech.modules.vocoder.parallel_wavegan.layers import upsample +from text_to_speech.modules.vocoder.parallel_wavegan import models +from text_to_speech.modules.vocoder.parallel_wavegan.models import SourceModuleCycNoise_v1 +from text_to_speech.utils.commons.hparams import hparams +import numpy as np + +class ParallelWaveGANGenerator(torch.nn.Module): + """Parallel WaveGAN Generator module.""" + + def __init__(self, + in_channels=1, + out_channels=1, + kernel_size=3, + layers=30, + stacks=3, + residual_channels=64, + gate_channels=128, + skip_channels=64, + aux_channels=80, + aux_context_window=2, + dropout=0.0, + bias=True, + use_weight_norm=True, + use_causal_conv=False, + upsample_conditional_features=True, + upsample_net="ConvInUpsampleNetwork", + upsample_params={"upsample_scales": [4, 4, 4, 4]}, + use_pitch_embed=False, + use_nsf=False, + sample_rate=22050, + ): + """Initialize Parallel WaveGAN Generator module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_size (int): Kernel size of dilated convolution. + layers (int): Number of residual block layers. + stacks (int): Number of stacks i.e., dilation cycles. + residual_channels (int): Number of channels in residual conv. + gate_channels (int): Number of channels in gated conv. + skip_channels (int): Number of channels in skip conv. + aux_channels (int): Number of channels for auxiliary feature conv. + aux_context_window (int): Context window size for auxiliary feature. + dropout (float): Dropout rate. 0.0 means no dropout applied. + bias (bool): Whether to use bias parameter in conv layer. + use_weight_norm (bool): Whether to use weight norm. + If set to true, it will be applied to all of the conv layers. + use_causal_conv (bool): Whether to use causal structure. + upsample_conditional_features (bool): Whether to use upsampling network. + upsample_net (str): Upsampling network architecture. + upsample_params (dict): Upsampling network parameters. + + """ + super(ParallelWaveGANGenerator, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.aux_channels = aux_channels + self.layers = layers + self.stacks = stacks + self.kernel_size = kernel_size + + # check the number of layers and stacks + assert layers % stacks == 0 + layers_per_stack = layers // stacks + + # define first convolution + self.first_conv = Conv1d1x1(in_channels, residual_channels, bias=True) + + # define conv + upsampling network + self.aux_context_window = aux_context_window + if upsample_conditional_features: + upsample_params.update({ + "use_causal_conv": use_causal_conv, + }) + if upsample_net == "MelGANGenerator": + assert aux_context_window == 0 + upsample_params.update({ + "use_weight_norm": False, # not to apply twice + "use_final_nonlinear_activation": False, + }) + self.upsample_net = getattr(models, upsample_net)(**upsample_params) + else: + if upsample_net == "ConvInUpsampleNetwork": + upsample_params.update({ + "aux_channels": aux_channels, + "aux_context_window": aux_context_window, + }) + self.upsample_net = getattr(upsample, upsample_net)(**upsample_params) + else: + self.upsample_net = None + + # define residual blocks + self.conv_layers = torch.nn.ModuleList() + for layer in range(layers): + dilation = 2 ** (layer % layers_per_stack) + conv = ResidualBlock( + kernel_size=kernel_size, + residual_channels=residual_channels, + gate_channels=gate_channels, + skip_channels=skip_channels, + aux_channels=aux_channels, + dilation=dilation, + dropout=dropout, + bias=bias, + use_causal_conv=use_causal_conv, + ) + self.conv_layers += [conv] + + # define output layers + self.last_conv_layers = torch.nn.ModuleList([ + torch.nn.ReLU(inplace=True), + Conv1d1x1(skip_channels, skip_channels, bias=True), + torch.nn.ReLU(inplace=True), + Conv1d1x1(skip_channels, out_channels, bias=True), + ]) + + self.use_pitch_embed = use_pitch_embed + if use_pitch_embed: + self.pitch_embed = nn.Embedding(300, aux_channels, 0) + self.c_proj = nn.Linear(2 * aux_channels, aux_channels) + self.use_nsf = use_nsf + if use_nsf: + self.harmonic_num = 8 + hop_size = np.prod(upsample_params['upsample_scales']) + self.f0_upsamp = torch.nn.Upsample(scale_factor=hop_size) + self.m_source = SourceModuleCycNoise_v1(sample_rate, 0.003) + self.nsf_conv = nn.Sequential(nn.Conv1d(1, aux_channels, 1), torch.nn.Tanh()) + + # apply weight norm + if use_weight_norm: + self.apply_weight_norm() + + def forward(self, x, c=None, pitch=None, f0=None, **kwargs): + """Calculate forward propagation. + + Args: + x (Tensor): Input noise signal (B, C_in, T). + c (Tensor): Local conditioning auxiliary features (B, C ,T'). + pitch (Tensor): Local conditioning pitch (B, T'). + + Returns: + Tensor: Output tensor (B, C_out, T) + + """ + # perform upsampling + if c is not None and self.upsample_net is not None: + if self.use_pitch_embed: + p = self.pitch_embed(pitch) + c = self.c_proj(torch.cat([c.transpose(1, 2), p], -1)).transpose(1, 2) + c = self.upsample_net(c) + if self.use_nsf: + f0_upsample = self.f0_upsamp( + f0[:, None, :][:, :, self.aux_context_window:-self.aux_context_window]) + f0_upsample = self.nsf_conv(f0_upsample) + c = c + f0_upsample + if x is None: + x = torch.randn([c.size(0), 1, c.size(-1)]).to(c.device) + assert c.size(-1) == x.size(-1), (c.size(-1), x.size(-1)) + + # encode to hidden representation + x = self.first_conv(x) + skips = 0 + for f in self.conv_layers: + x, h = f(x, c) + skips += h + skips *= math.sqrt(1.0 / len(self.conv_layers)) + + # apply final layers + x = skips + for f in self.last_conv_layers: + x = f(x) + + return x + + def remove_weight_norm(self): + """Remove weight normalization module from all of the layers.""" + def _remove_weight_norm(m): + try: + logging.debug(f"Weight norm is removed from {m}.") + torch.nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(_remove_weight_norm) + + def apply_weight_norm(self): + """Apply weight normalization module from all of the layers.""" + def _apply_weight_norm(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): + torch.nn.utils.weight_norm(m) + logging.debug(f"Weight norm is applied to {m}.") + + self.apply(_apply_weight_norm) + + @staticmethod + def _get_receptive_field_size(layers, stacks, kernel_size, + dilation=lambda x: 2 ** x): + assert layers % stacks == 0 + layers_per_cycle = layers // stacks + dilations = [dilation(i % layers_per_cycle) for i in range(layers)] + return (kernel_size - 1) * sum(dilations) + 1 + + @property + def receptive_field_size(self): + """Return receptive field size.""" + return self._get_receptive_field_size(self.layers, self.stacks, self.kernel_size) + + +class ParallelWaveGANDiscriminator(torch.nn.Module): + """Parallel WaveGAN Discriminator module.""" + + def __init__(self, + in_channels=1, + out_channels=1, + kernel_size=3, + layers=10, + conv_channels=64, + dilation_factor=1, + nonlinear_activation="LeakyReLU", + nonlinear_activation_params={"negative_slope": 0.2}, + bias=True, + use_weight_norm=True, + ): + """Initialize Parallel WaveGAN Discriminator module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_size (int): Number of output channels. + layers (int): Number of conv layers. + conv_channels (int): Number of chnn layers. + dilation_factor (int): Dilation factor. For example, if dilation_factor = 2, + the dilation will be 2, 4, 8, ..., and so on. + nonlinear_activation (str): Nonlinear function after each conv. + nonlinear_activation_params (dict): Nonlinear function parameters + bias (bool): Whether to use bias parameter in conv. + use_weight_norm (bool) Whether to use weight norm. + If set to true, it will be applied to all of the conv layers. + + """ + super(ParallelWaveGANDiscriminator, self).__init__() + assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." + assert dilation_factor > 0, "Dilation factor must be > 0." + self.conv_layers = torch.nn.ModuleList() + conv_in_channels = in_channels + for i in range(layers - 1): + if i == 0: + dilation = 1 + else: + dilation = i if dilation_factor == 1 else dilation_factor ** i + conv_in_channels = conv_channels + padding = (kernel_size - 1) // 2 * dilation + conv_layer = [ + Conv1d(conv_in_channels, conv_channels, + kernel_size=kernel_size, padding=padding, + dilation=dilation, bias=bias), + getattr(torch.nn, nonlinear_activation)(inplace=True, **nonlinear_activation_params) + ] + self.conv_layers += conv_layer + padding = (kernel_size - 1) // 2 + last_conv_layer = Conv1d( + conv_in_channels, out_channels, + kernel_size=kernel_size, padding=padding, bias=bias) + self.conv_layers += [last_conv_layer] + + # apply weight norm + if use_weight_norm: + self.apply_weight_norm() + + def forward(self, x, cond=None): + """Calculate forward propagation. + + Args: + x (Tensor): Input noise signal (B, 1, T). + cond (Tensor): Input noise signal (B, H, T_frame). + + Returns: + Tensor: Output tensor (B, 1, T) + + """ + cond_layer_i = len(self.conv_layers) // 2 + for i, f in enumerate(self.conv_layers): + if i == cond_layer_i and cond is not None: + aux_context_window = hparams['aux_context_window'] + cond = cond[:, :, aux_context_window:-aux_context_window] + cond = cond[:, :, :, None].repeat([1, 1, 1, hparams['hop_size']]).reshape( + cond.shape[0], cond.shape[1], -1) + x = x + cond + x = f(x) + return x + + def apply_weight_norm(self): + """Apply weight normalization module from all of the layers.""" + def _apply_weight_norm(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): + torch.nn.utils.weight_norm(m) + logging.debug(f"Weight norm is applied to {m}.") + + self.apply(_apply_weight_norm) + + def remove_weight_norm(self): + """Remove weight normalization module from all of the layers.""" + def _remove_weight_norm(m): + try: + logging.debug(f"Weight norm is removed from {m}.") + torch.nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(_remove_weight_norm) + + +class ResidualParallelWaveGANDiscriminator(torch.nn.Module): + """Parallel WaveGAN Discriminator module.""" + + def __init__(self, + in_channels=1, + out_channels=1, + kernel_size=3, + layers=30, + stacks=3, + residual_channels=64, + gate_channels=128, + skip_channels=64, + dropout=0.0, + bias=True, + use_weight_norm=True, + use_causal_conv=False, + nonlinear_activation="LeakyReLU", + nonlinear_activation_params={"negative_slope": 0.2}, + ): + """Initialize Parallel WaveGAN Discriminator module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_size (int): Kernel size of dilated convolution. + layers (int): Number of residual block layers. + stacks (int): Number of stacks i.e., dilation cycles. + residual_channels (int): Number of channels in residual conv. + gate_channels (int): Number of channels in gated conv. + skip_channels (int): Number of channels in skip conv. + dropout (float): Dropout rate. 0.0 means no dropout applied. + bias (bool): Whether to use bias parameter in conv. + use_weight_norm (bool): Whether to use weight norm. + If set to true, it will be applied to all of the conv layers. + use_causal_conv (bool): Whether to use causal structure. + nonlinear_activation_params (dict): Nonlinear function parameters + + """ + super(ResidualParallelWaveGANDiscriminator, self).__init__() + assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." + + self.in_channels = in_channels + self.out_channels = out_channels + self.layers = layers + self.stacks = stacks + self.kernel_size = kernel_size + + # check the number of layers and stacks + assert layers % stacks == 0 + layers_per_stack = layers // stacks + + # define first convolution + self.first_conv = torch.nn.Sequential( + Conv1d1x1(in_channels, residual_channels, bias=True), + getattr(torch.nn, nonlinear_activation)( + inplace=True, **nonlinear_activation_params), + ) + + # define residual blocks + self.conv_layers = torch.nn.ModuleList() + for layer in range(layers): + dilation = 2 ** (layer % layers_per_stack) + conv = ResidualBlock( + kernel_size=kernel_size, + residual_channels=residual_channels, + gate_channels=gate_channels, + skip_channels=skip_channels, + aux_channels=-1, + dilation=dilation, + dropout=dropout, + bias=bias, + use_causal_conv=use_causal_conv, + ) + self.conv_layers += [conv] + + # define output layers + self.last_conv_layers = torch.nn.ModuleList([ + getattr(torch.nn, nonlinear_activation)( + inplace=True, **nonlinear_activation_params), + Conv1d1x1(skip_channels, skip_channels, bias=True), + getattr(torch.nn, nonlinear_activation)( + inplace=True, **nonlinear_activation_params), + Conv1d1x1(skip_channels, out_channels, bias=True), + ]) + + # apply weight norm + if use_weight_norm: + self.apply_weight_norm() + + def forward(self, x): + """Calculate forward propagation. + + Args: + x (Tensor): Input noise signal (B, 1, T). + + Returns: + Tensor: Output tensor (B, 1, T) + + """ + x = self.first_conv(x) + + skips = 0 + for f in self.conv_layers: + x, h = f(x, None) + skips += h + skips *= math.sqrt(1.0 / len(self.conv_layers)) + + # apply final layers + x = skips + for f in self.last_conv_layers: + x = f(x) + return x + + def apply_weight_norm(self): + """Apply weight normalization module from all of the layers.""" + def _apply_weight_norm(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): + torch.nn.utils.weight_norm(m) + logging.debug(f"Weight norm is applied to {m}.") + + self.apply(_apply_weight_norm) + + def remove_weight_norm(self): + """Remove weight normalization module from all of the layers.""" + def _remove_weight_norm(m): + try: + logging.debug(f"Weight norm is removed from {m}.") + torch.nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(_remove_weight_norm) diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/models/source.py b/text_to_speech/modules/vocoder/parallel_wavegan/models/source.py new file mode 100644 index 0000000000000000000000000000000000000000..f2a006e53c0e2194036fd08ea9d6ed4d9a10d6cf --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/models/source.py @@ -0,0 +1,538 @@ +import torch +import numpy as np +import sys +import torch.nn.functional as torch_nn_func + + +class SineGen(torch.nn.Module): + """ Definition of sine generator + SineGen(samp_rate, harmonic_num = 0, + sine_amp = 0.1, noise_std = 0.003, + voiced_threshold = 0, + flag_for_pulse=False) + + samp_rate: sampling rate in Hz + harmonic_num: number of harmonic overtones (default 0) + sine_amp: amplitude of sine-wavefrom (default 0.1) + noise_std: std of Gaussian noise (default 0.003) + voiced_thoreshold: F0 threshold for U/V classification (default 0) + flag_for_pulse: this SinGen is used inside PulseGen (default False) + + Note: when flag_for_pulse is True, the first time step of a voiced + segment is always sin(np.pi) or cos(0) + """ + + def __init__(self, samp_rate, harmonic_num=0, + sine_amp=0.1, noise_std=0.003, + voiced_threshold=0, + flag_for_pulse=False): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.dim = self.harmonic_num + 1 + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + self.flag_for_pulse = flag_for_pulse + + def _f02uv(self, f0): + # generate uv signal + uv = torch.ones_like(f0) + uv = uv * (f0 > self.voiced_threshold) + return uv + + def _f02sine(self, f0_values): + """ f0_values: (batchsize, length, dim) + where dim indicates fundamental tone and overtones + """ + # convert to F0 in rad. The interger part n can be ignored + # because 2 * np.pi * n doesn't affect phase + rad_values = (f0_values / self.sampling_rate) % 1 + + # initial phase noise (no noise for fundamental component) + rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ + device=f0_values.device) + rand_ini[:, 0] = 0 + rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini + + # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) + if not self.flag_for_pulse: + # for normal case + + # To prevent torch.cumsum numerical overflow, + # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. + # Buffer tmp_over_one_idx indicates the time step to add -1. + # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi + tmp_over_one = torch.cumsum(rad_values, 1) % 1 + tmp_over_one_idx = (tmp_over_one[:, 1:, :] - + tmp_over_one[:, :-1, :]) < 0 + cumsum_shift = torch.zeros_like(rad_values) + cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + + sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) + * 2 * np.pi) + else: + # If necessary, make sure that the first time step of every + # voiced segments is sin(pi) or cos(0) + # This is used for pulse-train generation + + # identify the last time step in unvoiced segments + uv = self._f02uv(f0_values) + uv_1 = torch.roll(uv, shifts=-1, dims=1) + uv_1[:, -1, :] = 1 + u_loc = (uv < 1) * (uv_1 > 0) + + # get the instantanouse phase + tmp_cumsum = torch.cumsum(rad_values, dim=1) + # different batch needs to be processed differently + for idx in range(f0_values.shape[0]): + temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] + temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] + # stores the accumulation of i.phase within + # each voiced segments + tmp_cumsum[idx, :, :] = 0 + tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum + + # rad_values - tmp_cumsum: remove the accumulation of i.phase + # within the previous voiced segment. + i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) + + # get the sines + sines = torch.cos(i_phase * 2 * np.pi) + return sines + + def forward(self, f0): + """ sine_tensor, uv = forward(f0) + input F0: tensor(batchsize=1, length, dim=1) + f0 for unvoiced steps should be 0 + output sine_tensor: tensor(batchsize=1, length, dim) + output uv: tensor(batchsize=1, length, 1) + """ + with torch.no_grad(): + f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, + device=f0.device) + # fundamental component + f0_buf[:, :, 0] = f0[:, :, 0] + for idx in np.arange(self.harmonic_num): + # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic + f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2) + + # generate sine waveforms + sine_waves = self._f02sine(f0_buf) * self.sine_amp + + # generate uv signal + # uv = torch.ones(f0.shape) + # uv = uv * (f0 > self.voiced_threshold) + uv = self._f02uv(f0) + + # noise: for unvoiced should be similar to sine_amp + # std = self.sine_amp/3 -> max value ~ self.sine_amp + # . for voiced regions is self.noise_std + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + + # first: set the unvoiced part to 0 by uv + # then: additive noise + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + + +class PulseGen(torch.nn.Module): + """ Definition of Pulse train generator + + There are many ways to implement pulse generator. + Here, PulseGen is based on SinGen. For a perfect + """ + def __init__(self, samp_rate, pulse_amp = 0.1, + noise_std = 0.003, voiced_threshold = 0): + super(PulseGen, self).__init__() + self.pulse_amp = pulse_amp + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + self.noise_std = noise_std + self.l_sinegen = SineGen(self.sampling_rate, harmonic_num=0, \ + sine_amp=self.pulse_amp, noise_std=0, \ + voiced_threshold=self.voiced_threshold, \ + flag_for_pulse=True) + + def forward(self, f0): + """ Pulse train generator + pulse_train, uv = forward(f0) + input F0: tensor(batchsize=1, length, dim=1) + f0 for unvoiced steps should be 0 + output pulse_train: tensor(batchsize=1, length, dim) + output uv: tensor(batchsize=1, length, 1) + + Note: self.l_sine doesn't make sure that the initial phase of + a voiced segment is np.pi, the first pulse in a voiced segment + may not be at the first time step within a voiced segment + """ + with torch.no_grad(): + sine_wav, uv, noise = self.l_sinegen(f0) + + # sine without additive noise + pure_sine = sine_wav - noise + + # step t corresponds to a pulse if + # sine[t] > sine[t+1] & sine[t] > sine[t-1] + # & sine[t-1], sine[t+1], and sine[t] are voiced + # or + # sine[t] is voiced, sine[t-1] is unvoiced + # we use torch.roll to simulate sine[t+1] and sine[t-1] + sine_1 = torch.roll(pure_sine, shifts=1, dims=1) + uv_1 = torch.roll(uv, shifts=1, dims=1) + uv_1[:, 0, :] = 0 + sine_2 = torch.roll(pure_sine, shifts=-1, dims=1) + uv_2 = torch.roll(uv, shifts=-1, dims=1) + uv_2[:, -1, :] = 0 + + loc = (pure_sine > sine_1) * (pure_sine > sine_2) \ + * (uv_1 > 0) * (uv_2 > 0) * (uv > 0) \ + + (uv_1 < 1) * (uv > 0) + + # pulse train without noise + pulse_train = pure_sine * loc + + # additive noise to pulse train + # note that noise from sinegen is zero in voiced regions + pulse_noise = torch.randn_like(pure_sine) * self.noise_std + + # with additive noise on pulse, and unvoiced regions + pulse_train += pulse_noise * loc + pulse_noise * (1 - uv) + return pulse_train, sine_wav, uv, pulse_noise + + +class SignalsConv1d(torch.nn.Module): + """ Filtering input signal with time invariant filter + Note: FIRFilter conducted filtering given fixed FIR weight + SignalsConv1d convolves two signals + Note: this is based on torch.nn.functional.conv1d + + """ + + def __init__(self): + super(SignalsConv1d, self).__init__() + + def forward(self, signal, system_ir): + """ output = forward(signal, system_ir) + + signal: (batchsize, length1, dim) + system_ir: (length2, dim) + + output: (batchsize, length1, dim) + """ + if signal.shape[-1] != system_ir.shape[-1]: + print("Error: SignalsConv1d expects shape:") + print("signal (batchsize, length1, dim)") + print("system_id (batchsize, length2, dim)") + print("But received signal: {:s}".format(str(signal.shape))) + print(" system_ir: {:s}".format(str(system_ir.shape))) + sys.exit(1) + padding_length = system_ir.shape[0] - 1 + groups = signal.shape[-1] + + # pad signal on the left + signal_pad = torch_nn_func.pad(signal.permute(0, 2, 1), \ + (padding_length, 0)) + # prepare system impulse response as (dim, 1, length2) + # also flip the impulse response + ir = torch.flip(system_ir.unsqueeze(1).permute(2, 1, 0), \ + dims=[2]) + # convolute + output = torch_nn_func.conv1d(signal_pad, ir, groups=groups) + return output.permute(0, 2, 1) + + +class CyclicNoiseGen_v1(torch.nn.Module): + """ CyclicnoiseGen_v1 + Cyclic noise with a single parameter of beta. + Pytorch v1 implementation assumes f_t is also fixed + """ + + def __init__(self, samp_rate, + noise_std=0.003, voiced_threshold=0): + super(CyclicNoiseGen_v1, self).__init__() + self.samp_rate = samp_rate + self.noise_std = noise_std + self.voiced_threshold = voiced_threshold + + self.l_pulse = PulseGen(samp_rate, pulse_amp=1.0, + noise_std=noise_std, + voiced_threshold=voiced_threshold) + self.l_conv = SignalsConv1d() + + def noise_decay(self, beta, f0mean): + """ decayed_noise = noise_decay(beta, f0mean) + decayed_noise = n[t]exp(-t * f_mean / beta / samp_rate) + + beta: (dim=1) or (batchsize=1, 1, dim=1) + f0mean (batchsize=1, 1, dim=1) + + decayed_noise (batchsize=1, length, dim=1) + """ + with torch.no_grad(): + # exp(-1.0 n / T) < 0.01 => n > -log(0.01)*T = 4.60*T + # truncate the noise when decayed by -40 dB + length = 4.6 * self.samp_rate / f0mean + length = length.int() + time_idx = torch.arange(0, length, device=beta.device) + time_idx = time_idx.unsqueeze(0).unsqueeze(2) + time_idx = time_idx.repeat(beta.shape[0], 1, beta.shape[2]) + + noise = torch.randn(time_idx.shape, device=beta.device) + + # due to Pytorch implementation, use f0_mean as the f0 factor + decay = torch.exp(-time_idx * f0mean / beta / self.samp_rate) + return noise * self.noise_std * decay + + def forward(self, f0s, beta): + """ Producde cyclic-noise + """ + # pulse train + pulse_train, sine_wav, uv, noise = self.l_pulse(f0s) + pure_pulse = pulse_train - noise + + # decayed_noise (length, dim=1) + if (uv < 1).all(): + # all unvoiced + cyc_noise = torch.zeros_like(sine_wav) + else: + f0mean = f0s[uv > 0].mean() + + decayed_noise = self.noise_decay(beta, f0mean)[0, :, :] + # convolute + cyc_noise = self.l_conv(pure_pulse, decayed_noise) + + # add noise in invoiced segments + cyc_noise = cyc_noise + noise * (1.0 - uv) + return cyc_noise, pulse_train, sine_wav, uv, noise + + +class SineGen(torch.nn.Module): + """ Definition of sine generator + SineGen(samp_rate, harmonic_num = 0, + sine_amp = 0.1, noise_std = 0.003, + voiced_threshold = 0, + flag_for_pulse=False) + + samp_rate: sampling rate in Hz + harmonic_num: number of harmonic overtones (default 0) + sine_amp: amplitude of sine-wavefrom (default 0.1) + noise_std: std of Gaussian noise (default 0.003) + voiced_thoreshold: F0 threshold for U/V classification (default 0) + flag_for_pulse: this SinGen is used inside PulseGen (default False) + + Note: when flag_for_pulse is True, the first time step of a voiced + segment is always sin(np.pi) or cos(0) + """ + + def __init__(self, samp_rate, harmonic_num=0, + sine_amp=0.1, noise_std=0.003, + voiced_threshold=0, + flag_for_pulse=False): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.dim = self.harmonic_num + 1 + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + self.flag_for_pulse = flag_for_pulse + + def _f02uv(self, f0): + # generate uv signal + uv = torch.ones_like(f0) + uv = uv * (f0 > self.voiced_threshold) + return uv + + def _f02sine(self, f0_values): + """ f0_values: (batchsize, length, dim) + where dim indicates fundamental tone and overtones + """ + # convert to F0 in rad. The interger part n can be ignored + # because 2 * np.pi * n doesn't affect phase + rad_values = (f0_values / self.sampling_rate) % 1 + + # initial phase noise (no noise for fundamental component) + rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ + device=f0_values.device) + rand_ini[:, 0] = 0 + rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini + + # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) + if not self.flag_for_pulse: + # for normal case + + # To prevent torch.cumsum numerical overflow, + # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. + # Buffer tmp_over_one_idx indicates the time step to add -1. + # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi + tmp_over_one = torch.cumsum(rad_values, 1) % 1 + tmp_over_one_idx = (tmp_over_one[:, 1:, :] - + tmp_over_one[:, :-1, :]) < 0 + cumsum_shift = torch.zeros_like(rad_values) + cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + + sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) + * 2 * np.pi) + else: + # If necessary, make sure that the first time step of every + # voiced segments is sin(pi) or cos(0) + # This is used for pulse-train generation + + # identify the last time step in unvoiced segments + uv = self._f02uv(f0_values) + uv_1 = torch.roll(uv, shifts=-1, dims=1) + uv_1[:, -1, :] = 1 + u_loc = (uv < 1) * (uv_1 > 0) + + # get the instantanouse phase + tmp_cumsum = torch.cumsum(rad_values, dim=1) + # different batch needs to be processed differently + for idx in range(f0_values.shape[0]): + temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] + temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] + # stores the accumulation of i.phase within + # each voiced segments + tmp_cumsum[idx, :, :] = 0 + tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum + + # rad_values - tmp_cumsum: remove the accumulation of i.phase + # within the previous voiced segment. + i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) + + # get the sines + sines = torch.cos(i_phase * 2 * np.pi) + return sines + + def forward(self, f0): + """ sine_tensor, uv = forward(f0) + input F0: tensor(batchsize=1, length, dim=1) + f0 for unvoiced steps should be 0 + output sine_tensor: tensor(batchsize=1, length, dim) + output uv: tensor(batchsize=1, length, 1) + """ + with torch.no_grad(): + f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \ + device=f0.device) + # fundamental component + f0_buf[:, :, 0] = f0[:, :, 0] + for idx in np.arange(self.harmonic_num): + # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic + f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2) + + # generate sine waveforms + sine_waves = self._f02sine(f0_buf) * self.sine_amp + + # generate uv signal + # uv = torch.ones(f0.shape) + # uv = uv * (f0 > self.voiced_threshold) + uv = self._f02uv(f0) + + # noise: for unvoiced should be similar to sine_amp + # std = self.sine_amp/3 -> max value ~ self.sine_amp + # . for voiced regions is self.noise_std + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + + # first: set the unvoiced part to 0 by uv + # then: additive noise + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + + +class SourceModuleCycNoise_v1(torch.nn.Module): + """ SourceModuleCycNoise_v1 + SourceModule(sampling_rate, noise_std=0.003, voiced_threshod=0) + sampling_rate: sampling_rate in Hz + + noise_std: std of Gaussian noise (default: 0.003) + voiced_threshold: threshold to set U/V given F0 (default: 0) + + cyc, noise, uv = SourceModuleCycNoise_v1(F0_upsampled, beta) + F0_upsampled (batchsize, length, 1) + beta (1) + cyc (batchsize, length, 1) + noise (batchsize, length, 1) + uv (batchsize, length, 1) + """ + + def __init__(self, sampling_rate, noise_std=0.003, voiced_threshod=0): + super(SourceModuleCycNoise_v1, self).__init__() + self.sampling_rate = sampling_rate + self.noise_std = noise_std + self.l_cyc_gen = CyclicNoiseGen_v1(sampling_rate, noise_std, + voiced_threshod) + + def forward(self, f0_upsamped, beta): + """ + cyc, noise, uv = SourceModuleCycNoise_v1(F0, beta) + F0_upsampled (batchsize, length, 1) + beta (1) + cyc (batchsize, length, 1) + noise (batchsize, length, 1) + uv (batchsize, length, 1) + """ + # source for harmonic branch + cyc, pulse, sine, uv, add_noi = self.l_cyc_gen(f0_upsamped, beta) + + # source for noise branch, in the same shape as uv + noise = torch.randn_like(uv) * self.noise_std / 3 + return cyc, noise, uv + + +class SourceModuleHnNSF(torch.nn.Module): + """ SourceModule for hn-nsf + SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0) + sampling_rate: sampling_rate in Hz + harmonic_num: number of harmonic above F0 (default: 0) + sine_amp: amplitude of sine source signal (default: 0.1) + add_noise_std: std of additive Gaussian noise (default: 0.003) + note that amplitude of noise in unvoiced is decided + by sine_amp + voiced_threshold: threhold to set U/V given F0 (default: 0) + + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + uv (batchsize, length, 1) + """ + + def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0): + super(SourceModuleHnNSF, self).__init__() + + self.sine_amp = sine_amp + self.noise_std = add_noise_std + + # to produce sine waveforms + self.l_sin_gen = SineGen(sampling_rate, harmonic_num, + sine_amp, add_noise_std, voiced_threshod) + + # to merge source harmonics into a single excitation + self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) + self.l_tanh = torch.nn.Tanh() + + def forward(self, x): + """ + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + """ + # source for harmonic branch + sine_wavs, uv, _ = self.l_sin_gen(x) + sine_merge = self.l_tanh(self.l_linear(sine_wavs)) + + # source for noise branch, in the same shape as uv + noise = torch.randn_like(uv) * self.sine_amp / 3 + return sine_merge, noise, uv + + +if __name__ == '__main__': + source = SourceModuleCycNoise_v1(24000) + x = torch.randn(16, 25600, 1) + + diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/optimizers/__init__.py b/text_to_speech/modules/vocoder/parallel_wavegan/optimizers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a0e0c5932838281e912079e5784d84d43444a61a --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/optimizers/__init__.py @@ -0,0 +1,2 @@ +from torch.optim import * # NOQA +from .radam import * # NOQA diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/optimizers/radam.py b/text_to_speech/modules/vocoder/parallel_wavegan/optimizers/radam.py new file mode 100644 index 0000000000000000000000000000000000000000..e805d7e34921bee436e1e7fd9e1f753c7609186b --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/optimizers/radam.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- + +"""RAdam optimizer. + +This code is drived from https://github.com/LiyuanLucasLiu/RAdam. +""" + +import math +import torch + +from torch.optim.optimizer import Optimizer + + +class RAdam(Optimizer): + """Rectified Adam optimizer.""" + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): + """Initilize RAdam optimizer.""" + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + self.buffer = [[None, None, None] for ind in range(10)] + super(RAdam, self).__init__(params, defaults) + + def __setstate__(self, state): + """Set state.""" + super(RAdam, self).__setstate__(state) + + def step(self, closure=None): + """Run one step.""" + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data.float() + if grad.is_sparse: + raise RuntimeError('RAdam does not support sparse gradients') + + p_data_fp32 = p.data.float() + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_data_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) + else: + state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + exp_avg.mul_(beta1).add_(1 - beta1, grad) + + state['step'] += 1 + buffered = self.buffer[int(state['step'] % 10)] + if state['step'] == buffered[0]: + N_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + N_sma_max = 2 / (1 - beta2) - 1 + N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = N_sma + + # more conservative since it's an approximated value + if N_sma >= 5: + step_size = math.sqrt( + (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step']) # NOQA + else: + step_size = 1.0 / (1 - beta1 ** state['step']) + buffered[2] = step_size + + if group['weight_decay'] != 0: + p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) + + # more conservative since it's an approximated value + if N_sma >= 5: + denom = exp_avg_sq.sqrt().add_(group['eps']) + p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom) + else: + p_data_fp32.add_(-step_size * group['lr'], exp_avg) + + p.data.copy_(p_data_fp32) + + return loss diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/stft_loss.py b/text_to_speech/modules/vocoder/parallel_wavegan/stft_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..729d694356d0353f3e3dd18730b82db55fc66550 --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/stft_loss.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- + +# Copyright 2019 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""STFT-based Loss modules.""" +import librosa +import torch + +from text_to_speech.modules.vocoder.parallel_wavegan.losses import LogSTFTMagnitudeLoss, SpectralConvergengeLoss, stft + + +class STFTLoss(torch.nn.Module): + """STFT loss module.""" + + def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window", + use_mel_loss=False): + """Initialize STFT loss module.""" + super(STFTLoss, self).__init__() + self.fft_size = fft_size + self.shift_size = shift_size + self.win_length = win_length + self.window = getattr(torch, window)(win_length) + self.spectral_convergenge_loss = SpectralConvergengeLoss() + self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss() + self.use_mel_loss = use_mel_loss + self.mel_basis = None + + def forward(self, x, y): + """Calculate forward propagation. + + Args: + x (Tensor): Predicted signal (B, T). + y (Tensor): Groundtruth signal (B, T). + + Returns: + Tensor: Spectral convergence loss value. + Tensor: Log STFT magnitude loss value. + + """ + if self.window.device != x.device: + self.window = self.window.to(x.device) + x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window) + y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window) + if self.use_mel_loss: + if self.mel_basis is None: + self.mel_basis = torch.from_numpy(librosa.filters.mel(22050, self.fft_size, 80)).cuda().T + x_mag = x_mag @ self.mel_basis + y_mag = y_mag @ self.mel_basis + + sc_loss = self.spectral_convergenge_loss(x_mag, y_mag) + mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag) + + return sc_loss, mag_loss + + +class MultiResolutionSTFTLoss(torch.nn.Module): + """Multi resolution STFT loss module.""" + + def __init__(self, + fft_sizes=[1024, 2048, 512], + hop_sizes=[120, 240, 50], + win_lengths=[600, 1200, 240], + window="hann_window", + use_mel_loss=False): + """Initialize Multi resolution STFT loss module. + + Args: + fft_sizes (list): List of FFT sizes. + hop_sizes (list): List of hop sizes. + win_lengths (list): List of window lengths. + window (str): Window function type. + + """ + super(MultiResolutionSTFTLoss, self).__init__() + assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) + self.stft_losses = torch.nn.ModuleList() + for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths): + self.stft_losses += [STFTLoss(fs, ss, wl, window, use_mel_loss)] + + def forward(self, x, y): + """Calculate forward propagation. + + Args: + x (Tensor): Predicted signal (B, T). + y (Tensor): Groundtruth signal (B, T). + + Returns: + Tensor: Multi resolution spectral convergence loss value. + Tensor: Multi resolution log STFT magnitude loss value. + + """ + sc_loss = 0.0 + mag_loss = 0.0 + for f in self.stft_losses: + sc_l, mag_l = f(x, y) + sc_loss += sc_l + mag_loss += mag_l + sc_loss /= len(self.stft_losses) + mag_loss /= len(self.stft_losses) + + return sc_loss, mag_loss diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/utils/__init__.py b/text_to_speech/modules/vocoder/parallel_wavegan/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e8fa95a020706b5412c3959fbf6e5980019c0d5f --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/utils/__init__.py @@ -0,0 +1 @@ +from .utils import * # NOQA diff --git a/text_to_speech/modules/vocoder/parallel_wavegan/utils/utils.py b/text_to_speech/modules/vocoder/parallel_wavegan/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..69fdc4cdb5d75b907c8b9372a9f2448a9a166730 --- /dev/null +++ b/text_to_speech/modules/vocoder/parallel_wavegan/utils/utils.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- + +# Copyright 2019 Tomoki Hayashi +# MIT License (https://opensource.org/licenses/MIT) + +"""Utility functions.""" + +import fnmatch +import logging +import os +import sys +try: + import h5py +except: + pass +import numpy as np + + +def find_files(root_dir, query="*.wav", include_root_dir=True): + """Find files recursively. + + Args: + root_dir (str): Root root_dir to find. + query (str): Query to find. + include_root_dir (bool): If False, root_dir name is not included. + + Returns: + list: List of found filenames. + + """ + files = [] + for root, dirnames, filenames in os.walk(root_dir, followlinks=True): + for filename in fnmatch.filter(filenames, query): + files.append(os.path.join(root, filename)) + if not include_root_dir: + files = [file_.replace(root_dir + "/", "") for file_ in files] + + return files + + +def read_hdf5(hdf5_name, hdf5_path): + """Read hdf5 dataset. + + Args: + hdf5_name (str): Filename of hdf5 file. + hdf5_path (str): Dataset name in hdf5 file. + + Return: + any: Dataset values. + + """ + if not os.path.exists(hdf5_name): + logging.error(f"There is no such a hdf5 file ({hdf5_name}).") + sys.exit(1) + + hdf5_file = h5py.File(hdf5_name, "r") + + if hdf5_path not in hdf5_file: + logging.error(f"There is no such a data in hdf5 file. ({hdf5_path})") + sys.exit(1) + + hdf5_data = hdf5_file[hdf5_path][()] + hdf5_file.close() + + return hdf5_data + + +def write_hdf5(hdf5_name, hdf5_path, write_data, is_overwrite=True): + """Write dataset to hdf5. + + Args: + hdf5_name (str): Hdf5 dataset filename. + hdf5_path (str): Dataset path in hdf5. + write_data (ndarray): Data to write. + is_overwrite (bool): Whether to overwrite dataset. + + """ + # convert to numpy array + write_data = np.array(write_data) + + # check folder existence + folder_name, _ = os.path.split(hdf5_name) + if not os.path.exists(folder_name) and len(folder_name) != 0: + os.makedirs(folder_name) + + # check hdf5 existence + if os.path.exists(hdf5_name): + # if already exists, open with r+ mode + hdf5_file = h5py.File(hdf5_name, "r+") + # check dataset existence + if hdf5_path in hdf5_file: + if is_overwrite: + logging.warning("Dataset in hdf5 file already exists. " + "recreate dataset in hdf5.") + hdf5_file.__delitem__(hdf5_path) + else: + logging.error("Dataset in hdf5 file already exists. " + "if you want to overwrite, please set is_overwrite = True.") + hdf5_file.close() + sys.exit(1) + else: + # if not exists, open with w mode + hdf5_file = h5py.File(hdf5_name, "w") + + # write data to hdf5 + hdf5_file.create_dataset(hdf5_path, data=write_data) + hdf5_file.flush() + hdf5_file.close() + + +class HDF5ScpLoader(object): + """Loader class for a fests.scp file of hdf5 file. + + Examples: + key1 /some/path/a.h5:feats + key2 /some/path/b.h5:feats + key3 /some/path/c.h5:feats + key4 /some/path/d.h5:feats + ... + >>> loader = HDF5ScpLoader("hdf5.scp") + >>> array = loader["key1"] + + key1 /some/path/a.h5 + key2 /some/path/b.h5 + key3 /some/path/c.h5 + key4 /some/path/d.h5 + ... + >>> loader = HDF5ScpLoader("hdf5.scp", "feats") + >>> array = loader["key1"] + + """ + + def __init__(self, feats_scp, default_hdf5_path="feats"): + """Initialize HDF5 scp loader. + + Args: + feats_scp (str): Kaldi-style feats.scp file with hdf5 format. + default_hdf5_path (str): Path in hdf5 file. If the scp contain the info, not used. + + """ + self.default_hdf5_path = default_hdf5_path + with open(feats_scp) as f: + lines = [line.replace("\n", "") for line in f.readlines()] + self.data = {} + for line in lines: + key, value = line.split() + self.data[key] = value + + def get_path(self, key): + """Get hdf5 file path for a given key.""" + return self.data[key] + + def __getitem__(self, key): + """Get ndarray for a given key.""" + p = self.data[key] + if ":" in p: + return read_hdf5(*p.split(":")) + else: + return read_hdf5(p, self.default_hdf5_path) + + def __len__(self): + """Return the length of the scp file.""" + return len(self.data) + + def __iter__(self): + """Return the iterator of the scp file.""" + return iter(self.data) + + def keys(self): + """Return the keys of the scp file.""" + return self.data.keys() diff --git a/text_to_speech/tasks/run.py b/text_to_speech/tasks/run.py new file mode 100644 index 0000000000000000000000000000000000000000..7778a333d2e9b53e28bab6f93f0abf1c3540a079 --- /dev/null +++ b/text_to_speech/tasks/run.py @@ -0,0 +1,19 @@ +import os + +os.environ["OMP_NUM_THREADS"] = "1" + +from text_to_speech.utils.commons.hparams import hparams, set_hparams +import importlib + + +def run_task(): + assert hparams['task_cls'] != '' + pkg = ".".join(hparams["task_cls"].split(".")[:-1]) + cls_name = hparams["task_cls"].split(".")[-1] + task_cls = getattr(importlib.import_module(pkg), cls_name) + task_cls.start() + + +if __name__ == '__main__': + set_hparams() + run_task() diff --git a/text_to_speech/tasks/tts/__pycache__/tts_utils.cpython-38.pyc b/text_to_speech/tasks/tts/__pycache__/tts_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ceeeedeb73bd5ec433f29d4966e04a9bc81d3d0 Binary files /dev/null and b/text_to_speech/tasks/tts/__pycache__/tts_utils.cpython-38.pyc differ diff --git a/text_to_speech/tasks/tts/dataset_utils.py b/text_to_speech/tasks/tts/dataset_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a511ac10828ee6ae5e4813a367d433059a9a2b4d --- /dev/null +++ b/text_to_speech/tasks/tts/dataset_utils.py @@ -0,0 +1,311 @@ +import torch.optim +import torch.utils.data +import numpy as np +import torch +import torch.optim +import torch.utils.data +import torch.distributions +from text_to_speech.utils.audio.pitch.utils import norm_interp_f0, denorm_f0 +from text_to_speech.utils.commons.dataset_utils import BaseDataset, collate_1d_or_2d +from text_to_speech.utils.commons.indexed_datasets import IndexedDataset +from text_to_speech.utils.commons.hparams import hparams +import random + + +class BaseSpeechDataset(BaseDataset): + def __init__(self, prefix, shuffle=False, items=None, data_dir=None): + super().__init__(shuffle) + from text_to_speech.utils.commons.hparams import hparams + self.data_dir = hparams['binary_data_dir'] if data_dir is None else data_dir + self.prefix = prefix + self.hparams = hparams + self.indexed_ds = None + if items is not None: + self.indexed_ds = items + self.sizes = [1] * len(items) + self.avail_idxs = list(range(len(self.sizes))) + else: + self.sizes = np.load(f'{self.data_dir}/{self.prefix}_lengths.npy') + if prefix == 'test' and len(hparams['test_ids']) > 0: + self.avail_idxs = hparams['test_ids'] + else: + self.avail_idxs = list(range(len(self.sizes))) + if prefix == 'train' and hparams['min_frames'] > 0: + self.avail_idxs = [x for x in self.avail_idxs if self.sizes[x] >= hparams['min_frames']] + try: + self.sizes = [self.sizes[i] for i in self.avail_idxs] + except: + tmp_sizes = [] + for i in self.avail_idxs: + try: + tmp_sizes.append(self.sizes[i]) + except: + continue + self.sizes = tmp_sizes + + def _get_item(self, index): + if hasattr(self, 'avail_idxs') and self.avail_idxs is not None: + index = self.avail_idxs[index] + if self.indexed_ds is None: + self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}') + return self.indexed_ds[index] + + def __getitem__(self, index): + hparams = self.hparams + item = self._get_item(index) + assert len(item['mel']) == self.sizes[index], (len(item['mel']), self.sizes[index]) + max_frames = hparams['max_frames'] + spec = torch.Tensor(item['mel'])[:max_frames] + max_frames = spec.shape[0] // hparams['frames_multiple'] * hparams['frames_multiple'] + spec = spec[:max_frames] + ph_token = torch.LongTensor(item['ph_token'][:hparams['max_input_tokens']]) + sample = { + "id": index, + "item_name": item['item_name'], + "text": item['txt'], + "txt_token": ph_token, + "mel": spec, + "mel_nonpadding": spec.abs().sum(-1) > 0, + } + if hparams['use_spk_embed']: + sample["spk_embed"] = torch.Tensor(item['spk_embed']) + if hparams['use_spk_id']: + sample["spk_id"] = int(item['spk_id']) + return sample + + def collater(self, samples): + if len(samples) == 0: + return {} + hparams = self.hparams + ids = [s['id'] for s in samples] + item_names = [s['item_name'] for s in samples] + text = [s['text'] for s in samples] + txt_tokens = collate_1d_or_2d([s['txt_token'] for s in samples], 0) + mels = collate_1d_or_2d([s['mel'] for s in samples], 0.0) + txt_lengths = torch.LongTensor([s['txt_token'].numel() for s in samples]) + mel_lengths = torch.LongTensor([s['mel'].shape[0] for s in samples]) + + batch = { + 'id': ids, + 'item_name': item_names, + 'nsamples': len(samples), + 'text': text, + 'txt_tokens': txt_tokens, + 'txt_lengths': txt_lengths, + 'mels': mels, + 'mel_lengths': mel_lengths, + } + + if hparams['use_spk_embed']: + spk_embed = torch.stack([s['spk_embed'] for s in samples]) + batch['spk_embed'] = spk_embed + if hparams['use_spk_id']: + spk_ids = torch.LongTensor([s['spk_id'] for s in samples]) + batch['spk_ids'] = spk_ids + return batch + + +class FastSpeechDataset(BaseSpeechDataset): + def __getitem__(self, index): + sample = super(FastSpeechDataset, self).__getitem__(index) + item = self._get_item(index) + hparams = self.hparams + mel = sample['mel'] + T = mel.shape[0] + ph_token = sample['txt_token'] + sample['mel2ph'] = mel2ph = torch.LongTensor(item['mel2ph'])[:T] + if hparams['use_pitch_embed']: + assert 'f0' in item + pitch = torch.LongTensor(item.get(hparams.get('pitch_key', 'pitch')))[:T] + f0, uv = norm_interp_f0(item["f0"][:T]) + uv = torch.FloatTensor(uv) + f0 = torch.FloatTensor(f0) + if hparams['pitch_type'] == 'ph': + if "f0_ph" in item: + f0 = torch.FloatTensor(item['f0_ph']) + else: + f0 = denorm_f0(f0, None) + f0_phlevel_sum = torch.zeros_like(ph_token).float().scatter_add(0, mel2ph - 1, f0) + f0_phlevel_num = torch.zeros_like(ph_token).float().scatter_add( + 0, mel2ph - 1, torch.ones_like(f0)).clamp_min(1) + f0_ph = f0_phlevel_sum / f0_phlevel_num + f0, uv = norm_interp_f0(f0_ph) + else: + f0, uv, pitch = None, None, None + sample["f0"], sample["uv"], sample["pitch"] = f0, uv, pitch + return sample + + def collater(self, samples): + if len(samples) == 0: + return {} + batch = super(FastSpeechDataset, self).collater(samples) + hparams = self.hparams + if hparams['use_pitch_embed']: + f0 = collate_1d_or_2d([s['f0'] for s in samples], 0.0) + pitch = collate_1d_or_2d([s['pitch'] for s in samples]) + uv = collate_1d_or_2d([s['uv'] for s in samples]) + else: + f0, uv, pitch = None, None, None + mel2ph = collate_1d_or_2d([s['mel2ph'] for s in samples], 0.0) + batch.update({ + 'mel2ph': mel2ph, + 'pitch': pitch, + 'f0': f0, + 'uv': uv, + }) + return batch + +class FastSpeechWordDataset(FastSpeechDataset): + def __init__(self, prefix, shuffle=False, items=None, data_dir=None): + super().__init__(prefix, shuffle, items, data_dir) + # BERT contrastive loss & mlm loss + # from transformers import AutoTokenizer + # if hparams['ds_name'] in ['ljspeech', 'libritts']: + # self.tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') + # elif hparams['ds_name'] == 'biaobei': + # self.tokenizer = AutoTokenizer.from_pretrained('bert-base-chinese') + # else: + # raise NotImplementedError() + # self.mlm_probability = 0.15 + # if hparams.get("cl_ds_name") is None: + # pass + # elif hparams['cl_ds_name'] == "wiki": + # from experimental_yerfor.simcse_datasets import WikiDataset + # self.cl_dataset = WikiDataset(prefix=prefix) + # shuffle = True if prefix == 'train' else False + # endless = True + # num_workers = None if prefix == 'train' else 0 + # self.cl_dataloader = self.cl_dataset.build_dataloader(shuffle=shuffle, max_tokens=hparams.get("cl_max_tokens", 3200), + # max_sentences=hparams.get("cl_max_sentences", 64), endless=endless, num_workers=num_workers) + # self.cl_dl_iter = iter(self.cl_dataloader) + # elif hparams['cl_ds_name'] == "nli": + # from experimental_yerfor.simcse_datasets import NLIDataset + # self.cl_dataset = NLIDataset(prefix=prefix) + # shuffle = True if prefix == 'train' else False + # endless = True + # num_workers = None if prefix == 'train' else 0 + # self.cl_dataloader = self.cl_dataset.build_dataloader(shuffle=shuffle, max_tokens=hparams.get("cl_max_tokens", 4800), + # max_sentences=hparams.get("cl_max_sentences", 128), endless=endless, num_workers=num_workers) + # self.cl_dl_iter = iter(self.cl_dataloader) + + def __getitem__(self, index): + sample = super().__getitem__(index) + item = self._get_item(index) + max_frames = sample['mel'].shape[0] + if 'word' in item: + sample['words'] = item['word'] + sample["ph_words"] = item["ph_gb_word"] + sample["word_tokens"] = torch.LongTensor(item["word_token"]) + else: + sample['words'] = item['words'] + sample["ph_words"] = " ".join(item["ph_words"]) + sample["word_tokens"] = torch.LongTensor(item["word_tokens"]) + sample["mel2word"] = torch.LongTensor(item.get("mel2word"))[:max_frames] + sample["ph2word"] = torch.LongTensor(item['ph2word'][:self.hparams['max_input_tokens']]) + + # SyntaSpeech related features + # sample['dgl_graph'] = item['dgl_graph'] + # sample['edge_types'] = item['edge_types'] + + # BERT related features + # sample['bert_token'] = item['bert_token'] + # sample['bert_input_ids'] = torch.LongTensor(item['bert_input_ids']) + # sample['bert_token2word'] = torch.LongTensor(item['bert_token2word']) + # sample['bert_attention_mask'] = torch.LongTensor(item['bert_attention_mask']) + # sample['bert_token_type_ids'] = torch.LongTensor(item['bert_token_type_ids']) + + return sample + + def collater(self, samples): + samples = [s for s in samples if s is not None] + batch = super().collater(samples) + ph_words = [s['ph_words'] for s in samples] + batch['ph_words'] = ph_words + word_tokens = collate_1d_or_2d([s['word_tokens'] for s in samples], 0) + batch['word_tokens'] = word_tokens + mel2word = collate_1d_or_2d([s['mel2word'] for s in samples], 0) + batch['mel2word'] = mel2word + ph2word = collate_1d_or_2d([s['ph2word'] for s in samples], 0) + batch['ph2word'] = ph2word + batch['words'] = [s['words'] for s in samples] + batch['word_lengths'] = torch.LongTensor([len(s['word_tokens']) for s in samples]) + if self.hparams['use_word_input']: # always False + batch['txt_tokens'] = batch['word_tokens'] + batch['txt_lengths'] = torch.LongTensor([s['word_tokens'].numel() for s in samples]) + batch['mel2ph'] = batch['mel2word'] + + # SyntaSpeech + # graph_lst, etypes_lst = [], [] # new features for Graph-based SDP + # for s in samples: + # graph_lst.append(s['dgl_graph']) + # etypes_lst.append(s['edge_types']) + # batch.update({ + # 'graph_lst': graph_lst, + # 'etypes_lst': etypes_lst, + # }) + + # BERT + # batch['bert_feats'] = {} + # batch['bert_feats']['bert_tokens'] = [s['bert_token'] for s in samples] + # bert_input_ids = collate_1d_or_2d([s['bert_input_ids'] for s in samples], 0) + # batch['bert_feats']['bert_input_ids'] = bert_input_ids + # bert_token2word = collate_1d_or_2d([s['bert_token2word'] for s in samples], 0) + # batch['bert_feats']['bert_token2word'] = bert_token2word + # bert_attention_mask = collate_1d_or_2d([s['bert_attention_mask'] for s in samples], 0) + # batch['bert_feats']['bert_attention_mask'] = bert_attention_mask + # bert_token_type_ids = collate_1d_or_2d([s['bert_token_type_ids'] for s in samples], 0) + # batch['bert_feats']['bert_token_type_ids'] = bert_token_type_ids + + # BERT contrastive loss & mlm loss & electra loss + # if hparams.get("cl_ds_name") is None: + # batch['cl_feats'] = {} + # batch['cl_feats']['cl_input_ids'] = batch['bert_feats']['bert_input_ids'].unsqueeze(1).repeat([1,2,1]) + # batch['cl_feats']['cl_token2word'] = batch['bert_feats']['bert_token2word'].unsqueeze(1).repeat([1,2,1]) + # batch['cl_feats']['cl_attention_mask'] = batch['bert_feats']['bert_attention_mask'].unsqueeze(1).repeat([1,2,1]) + # batch['cl_feats']['cl_token_type_ids'] = batch['bert_feats']['bert_token_type_ids'].unsqueeze(1).repeat([1,2,1]) + # bs, _, t = batch['cl_feats']['cl_input_ids'].shape + # mlm_input_ids, mlm_labels = self.mask_tokens(batch['bert_feats']['bert_input_ids'].reshape([bs, t])) + # batch['cl_feats']["mlm_input_ids"] = mlm_input_ids.reshape([bs, t]) + # batch['cl_feats']["mlm_labels"] = mlm_labels.reshape([bs, t]) + # batch['cl_feats']["mlm_attention_mask"] = batch['bert_feats']['bert_attention_mask'] + # elif hparams['cl_ds_name'] in ["wiki", "nli"]: + # try: + # cl_feats = self.cl_dl_iter.__next__() + # except: + # self.cl_dl_iter = iter(self.cl_dataloader) + # cl_feats = self.cl_dl_iter.__next__() + # batch['cl_feats'] = cl_feats + return batch + + # def mask_tokens(self, inputs, special_tokens_mask=None): + # """ + # Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. + # """ + # inputs = inputs.clone() + # labels = inputs.clone() + # # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) + # probability_matrix = torch.full(labels.shape, self.mlm_probability) + # if special_tokens_mask is None: + # special_tokens_mask = [ + # self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist() + # ] + # special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool) + # else: + # special_tokens_mask = special_tokens_mask.bool() + + # probability_matrix.masked_fill_(special_tokens_mask, value=0.0) + # masked_indices = torch.bernoulli(probability_matrix).bool() + # labels[~masked_indices] = -100 # We only compute loss on masked tokens + + # # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) + # indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices + # inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) + + # # 10% of the time, we replace masked input tokens with random word + # indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced + # random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long) + # inputs[indices_random] = random_words[indices_random] + + # # The rest of the time (10% of the time) we keep the masked input tokens unchanged + # return inputs, labels + diff --git a/text_to_speech/tasks/tts/diffspeech.py b/text_to_speech/tasks/tts/diffspeech.py new file mode 100644 index 0000000000000000000000000000000000000000..db30b30b1c38d4bf624c950c9799e695ec2de8ba --- /dev/null +++ b/text_to_speech/tasks/tts/diffspeech.py @@ -0,0 +1,111 @@ +import torch + +from text_to_speech.modules.tts.diffspeech.shallow_diffusion_tts import GaussianDiffusion +from tasks.tts.fs2_orig import FastSpeech2OrigTask + +import utils +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.commons.ckpt_utils import load_ckpt +from text_to_speech.utils.audio.pitch.utils import denorm_f0 + + +class DiffSpeechTask(FastSpeech2OrigTask): + def build_tts_model(self): + # get min and max + # import torch + # from tqdm import tqdm + # v_min = torch.ones([80]) * 100 + # v_max = torch.ones([80]) * -100 + # for i, ds in enumerate(tqdm(self.dataset_cls('train'))): + # v_max = torch.max(torch.max(ds['mel'].reshape(-1, 80), 0)[0], v_max) + # v_min = torch.min(torch.min(ds['mel'].reshape(-1, 80), 0)[0], v_min) + # if i % 100 == 0: + # print(i, v_min, v_max) + # print('final', v_min, v_max) + dict_size = len(self.token_encoder) + self.model = GaussianDiffusion(dict_size, hparams) + if hparams['fs2_ckpt'] != '': + load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True) + # for k, v in self.model.fs2.named_parameters(): + # if 'predictor' not in k: + # v.requires_grad = False + # or + for k, v in self.model.fs2.named_parameters(): + v.requires_grad = False + + def build_optimizer(self, model): + self.optimizer = optimizer = torch.optim.AdamW( + filter(lambda p: p.requires_grad, model.parameters()), + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + return optimizer + + def build_scheduler(self, optimizer): + return torch.optim.lr_scheduler.StepLR(optimizer, hparams['decay_steps'], gamma=0.5) + + def run_model(self, sample, infer=False, *args, **kwargs): + txt_tokens = sample['txt_tokens'] # [B, T_t] + spk_embed = sample.get('spk_embed') + spk_id = sample.get('spk_ids') + if not infer: + target = sample['mels'] # [B, T_s, 80] + mel2ph = sample['mel2ph'] # [B, T_s] + f0 = sample.get('f0') + uv = sample.get('uv') + output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id, + ref_mels=target, f0=f0, uv=uv, infer=False) + losses = {} + if 'diff_loss' in output: + losses['mel'] = output['diff_loss'] + self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses) + if hparams['use_pitch_embed']: + self.add_pitch_loss(output, sample, losses) + return losses, output + else: + use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) + use_gt_f0 = kwargs.get('infer_use_gt_f0', hparams['use_gt_f0']) + mel2ph, uv, f0 = None, None, None + if use_gt_dur: + mel2ph = sample['mel2ph'] + if use_gt_f0: + f0 = sample['f0'] + uv = sample['uv'] + output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id, + ref_mels=None, f0=f0, uv=uv, infer=True) + return output + + def save_valid_result(self, sample, batch_idx, model_out): + sr = hparams['audio_sample_rate'] + f0_gt = None + # mel_out = model_out['mel_out'] + if sample.get('f0') is not None: + f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu()) + # self.plot_mel(batch_idx, sample['mels'], mel_out, f0s=f0_gt) + if self.global_step > 0: + # wav_pred = self.vocoder.spec2wav(mel_out[0].cpu(), f0=f0_gt) + # self.logger.add_audio(f'wav_val_{batch_idx}', wav_pred, self.global_step, sr) + # with gt duration + model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True) + dur_info = self.get_plot_dur_info(sample, model_out) + del dur_info['dur_pred'] + wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'diffmel_gdur_{batch_idx}', + dur_info=dur_info, f0s=f0_gt) + self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'][0], f'fs2mel_gdur_{batch_idx}', + dur_info=dur_info, f0s=f0_gt) # gt mel vs. fs2 mel + + # with pred duration + if not hparams['use_gt_dur']: + model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False) + dur_info = self.get_plot_dur_info(sample, model_out) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_pdur_{batch_idx}', + dur_info=dur_info, f0s=f0_gt) + wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr) + # gt wav + if self.global_step <= hparams['valid_infer_interval']: + mel_gt = sample['mels'][0].cpu() + wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt) + self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr) diff --git a/text_to_speech/tasks/tts/fs.py b/text_to_speech/tasks/tts/fs.py new file mode 100644 index 0000000000000000000000000000000000000000..295e35083df2a056ca71c171fef732958be12685 --- /dev/null +++ b/text_to_speech/tasks/tts/fs.py @@ -0,0 +1,196 @@ +import torch +import torch.distributions +import torch.nn.functional as F +import torch.optim +import torch.utils.data + +from text_to_speech.modules.tts.fs import FastSpeech +from tasks.tts.dataset_utils import FastSpeechWordDataset +from tasks.tts.speech_base import SpeechBaseTask +from text_to_speech.utils.audio.align import mel2token_to_dur +from text_to_speech.utils.audio.pitch.utils import denorm_f0 +from text_to_speech.utils.commons.hparams import hparams + + +class FastSpeechTask(SpeechBaseTask): + def __init__(self): + super().__init__() + self.dataset_cls = FastSpeechWordDataset + self.sil_ph = self.token_encoder.sil_phonemes() + + def build_tts_model(self): + dict_size = len(self.token_encoder) + self.model = FastSpeech(dict_size, hparams) + + def run_model(self, sample, infer=False, *args, **kwargs): + txt_tokens = sample['txt_tokens'] # [B, T_t] + spk_embed = sample.get('spk_embed') + spk_id = sample.get('spk_ids') + if not infer: + target = sample['mels'] # [B, T_s, 80] + mel2ph = sample['mel2ph'] # [B, T_s] + f0 = sample.get('f0') + uv = sample.get('uv') + output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id, + f0=f0, uv=uv, infer=False, + ph2word=sample['ph2word'], + graph_lst=sample.get('graph_lst'), + etypes_lst=sample.get('etypes_lst'), + bert_feats=sample.get("bert_feats"), + cl_feats=sample.get("cl_feats") + ) + losses = {} + self.add_mel_loss(output['mel_out'], target, losses) + self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses) + if hparams['use_pitch_embed']: + self.add_pitch_loss(output, sample, losses) + return losses, output + else: + use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) + use_gt_f0 = kwargs.get('infer_use_gt_f0', hparams['use_gt_f0']) + mel2ph, uv, f0 = None, None, None + if use_gt_dur: + mel2ph = sample['mel2ph'] + if use_gt_f0: + f0 = sample['f0'] + uv = sample['uv'] + output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id, + f0=f0, uv=uv, infer=True, + ph2word=sample['ph2word'], + graph_lst=sample.get('graph_lst'), + etypes_lst=sample.get('etypes_lst'), + bert_feats=sample.get("bert_feats"), + cl_feats=sample.get("cl_feats") + ) + return output + + def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, losses=None): + """ + + :param dur_pred: [B, T], float, log scale + :param mel2ph: [B, T] + :param txt_tokens: [B, T] + :param losses: + :return: + """ + B, T = txt_tokens.shape + nonpadding = (txt_tokens != 0).float() + dur_gt = mel2token_to_dur(mel2ph, T).float() * nonpadding + is_sil = torch.zeros_like(txt_tokens).bool() + for p in self.sil_ph: + is_sil = is_sil | (txt_tokens == self.token_encoder.encode(p)[0]) + is_sil = is_sil.float() # [B, T_txt] + losses['pdur'] = F.mse_loss((dur_pred + 1).log(), (dur_gt + 1).log(), reduction='none') + losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum() + losses['pdur'] = losses['pdur'] * hparams['lambda_ph_dur'] + # use linear scale for sentence and word duration + if hparams['lambda_word_dur'] > 0: + word_id = (is_sil.cumsum(-1) * (1 - is_sil)).long() + word_dur_p = dur_pred.new_zeros([B, word_id.max() + 1]).scatter_add(1, word_id, dur_pred)[:, 1:] + word_dur_g = dur_gt.new_zeros([B, word_id.max() + 1]).scatter_add(1, word_id, dur_gt)[:, 1:] + wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none') + word_nonpadding = (word_dur_g > 0).float() + wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum() + losses['wdur'] = wdur_loss * hparams['lambda_word_dur'] + if hparams['lambda_sent_dur'] > 0: + sent_dur_p = dur_pred.sum(-1) + sent_dur_g = dur_gt.sum(-1) + sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean') + losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] + + def add_pitch_loss(self, output, sample, losses): + mel2ph = sample['mel2ph'] # [B, T_s] + f0 = sample['f0'] + uv = sample['uv'] + nonpadding = (mel2ph != 0).float() if hparams['pitch_type'] == 'frame' \ + else (sample['txt_tokens'] != 0).float() + p_pred = output['pitch_pred'] + assert p_pred[..., 0].shape == f0.shape + if hparams['use_uv'] and hparams['pitch_type'] == 'frame': + assert p_pred[..., 1].shape == uv.shape, (p_pred.shape, uv.shape) + losses['uv'] = (F.binary_cross_entropy_with_logits( + p_pred[:, :, 1], uv, reduction='none') * nonpadding).sum() \ + / nonpadding.sum() * hparams['lambda_uv'] + nonpadding = nonpadding * (uv == 0).float() + f0_pred = p_pred[:, :, 0] + losses['f0'] = (F.l1_loss(f0_pred, f0, reduction='none') * nonpadding).sum() \ + / nonpadding.sum() * hparams['lambda_f0'] + + def save_valid_result(self, sample, batch_idx, model_out): + sr = hparams['audio_sample_rate'] + f0_gt = None + mel_out = model_out['mel_out'] + if sample.get('f0') is not None: + f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu()) + self.plot_mel(batch_idx, sample['mels'], mel_out, f0s=f0_gt) + if self.global_step > 0: + wav_pred = self.vocoder.spec2wav(mel_out[0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_val_{batch_idx}', wav_pred, self.global_step, sr) + # with gt duration + model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True) + dur_info = self.get_plot_dur_info(sample, model_out) + del dur_info['dur_pred'] + wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_gdur_{batch_idx}', + dur_info=dur_info, f0s=f0_gt) + + # with pred duration + if not hparams['use_gt_dur']: + model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False) + dur_info = self.get_plot_dur_info(sample, model_out) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_pdur_{batch_idx}', + dur_info=dur_info, f0s=f0_gt) + wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr) + # gt wav + if self.global_step <= hparams['valid_infer_interval']: + mel_gt = sample['mels'][0].cpu() + wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt) + self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr) + + def get_plot_dur_info(self, sample, model_out): + T_txt = sample['txt_tokens'].shape[1] + dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0] + dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt + txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy()) + txt = txt.split(" ") + return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt} + + def test_step(self, sample, batch_idx): + """ + + :param sample: + :param batch_idx: + :return: + """ + assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference' + outputs = self.run_model(sample, infer=True) + text = sample['text'][0] + item_name = sample['item_name'][0] + tokens = sample['txt_tokens'][0].cpu().numpy() + mel_gt = sample['mels'][0].cpu().numpy() + mel_pred = outputs['mel_out'][0].cpu().numpy() + mel2ph = sample['mel2ph'][0].cpu().numpy() + mel2ph_pred = outputs['mel2ph'][0].cpu().numpy() + str_phs = self.token_encoder.decode(tokens, strip_padding=True) + base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]' + if text is not None: + base_fn += text.replace(":", "$3A")[:80] + base_fn = base_fn.replace(' ', '_') + gen_dir = self.gen_dir + wav_pred = self.vocoder.spec2wav(mel_pred) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred]) + if hparams['save_gt']: + wav_gt = self.vocoder.spec2wav(mel_gt) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph]) + print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}") + return { + 'item_name': item_name, + 'text': text, + 'ph_tokens': self.token_encoder.decode(tokens.tolist()), + 'wav_fn_pred': base_fn % 'P', + 'wav_fn_gt': base_fn % 'G', + } diff --git a/text_to_speech/tasks/tts/fs2_orig.py b/text_to_speech/tasks/tts/fs2_orig.py new file mode 100644 index 0000000000000000000000000000000000000000..40526806e3f86a90e156ed8a372b0d1a9807fca8 --- /dev/null +++ b/text_to_speech/tasks/tts/fs2_orig.py @@ -0,0 +1,138 @@ +import torch +import torch.nn.functional as F +from text_to_speech.modules.tts.fs2_orig import FastSpeech2Orig +from tasks.tts.dataset_utils import FastSpeechDataset +from tasks.tts.fs import FastSpeechTask +from text_to_speech.utils.commons.dataset_utils import collate_1d, collate_2d +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.plot.plot import spec_to_figure +import numpy as np + + +class FastSpeech2OrigDataset(FastSpeechDataset): + def __init__(self, prefix, shuffle=False, items=None, data_dir=None): + super().__init__(prefix, shuffle, items, data_dir) + self.pitch_type = hparams.get('pitch_type') + + def __getitem__(self, index): + sample = super().__getitem__(index) + item = self._get_item(index) + hparams = self.hparams + mel = sample['mel'] + T = mel.shape[0] + sample['energy'] = (mel.exp() ** 2).sum(-1).sqrt() + if hparams['use_pitch_embed'] and self.pitch_type == 'cwt': + cwt_spec = torch.Tensor(item['cwt_spec'])[:T] + f0_mean = item.get('f0_mean', item.get('cwt_mean')) + f0_std = item.get('f0_std', item.get('cwt_std')) + sample.update({"cwt_spec": cwt_spec, "f0_mean": f0_mean, "f0_std": f0_std}) + return sample + + def collater(self, samples): + if len(samples) == 0: + return {} + batch = super().collater(samples) + if hparams['use_pitch_embed']: + energy = collate_1d([s['energy'] for s in samples], 0.0) + else: + energy = None + batch.update({'energy': energy}) + if self.pitch_type == 'cwt': + cwt_spec = collate_2d([s['cwt_spec'] for s in samples]) + f0_mean = torch.Tensor([s['f0_mean'] for s in samples]) + f0_std = torch.Tensor([s['f0_std'] for s in samples]) + batch.update({'cwt_spec': cwt_spec, 'f0_mean': f0_mean, 'f0_std': f0_std}) + return batch + + +class FastSpeech2OrigTask(FastSpeechTask): + def __init__(self): + super(FastSpeech2OrigTask, self).__init__() + self.dataset_cls = FastSpeech2OrigDataset + + def build_tts_model(self): + dict_size = len(self.token_encoder) + self.model = FastSpeech2Orig(dict_size, hparams) + + def run_model(self, sample, infer=False, *args, **kwargs): + txt_tokens = sample['txt_tokens'] # [B, T_t] + spk_embed = sample.get('spk_embed') + spk_id = sample.get('spk_ids') + if not infer: + target = sample['mels'] # [B, T_s, 80] + mel2ph = sample['mel2ph'] # [B, T_s] + f0 = sample.get('f0') + uv = sample.get('uv') + energy = sample.get('energy') + output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id, + f0=f0, uv=uv, energy=energy, infer=False) + losses = {} + self.add_mel_loss(output['mel_out'], target, losses) + self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses) + if hparams['use_pitch_embed']: + self.add_pitch_loss(output, sample, losses) + if hparams['use_energy_embed']: + self.add_energy_loss(output, sample, losses) + return losses, output + else: + mel2ph, uv, f0, energy = None, None, None, None + use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) + use_gt_f0 = kwargs.get('infer_use_gt_f0', hparams['use_gt_f0']) + use_gt_energy = kwargs.get('infer_use_gt_energy', hparams['use_gt_energy']) + if use_gt_dur: + mel2ph = sample['mel2ph'] + if use_gt_f0: + f0 = sample['f0'] + uv = sample['uv'] + if use_gt_energy: + energy = sample['energy'] + output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id, + f0=f0, uv=uv, energy=energy, infer=True) + return output + + def save_valid_result(self, sample, batch_idx, model_out): + super(FastSpeech2OrigTask, self).save_valid_result(sample, batch_idx, model_out) + self.plot_cwt(batch_idx, model_out['cwt'], sample['cwt_spec']) + + def plot_cwt(self, batch_idx, cwt_out, cwt_gt=None): + if len(cwt_out.shape) == 3: + cwt_out = cwt_out[0] + if isinstance(cwt_out, torch.Tensor): + cwt_out = cwt_out.cpu().numpy() + if cwt_gt is not None: + if len(cwt_gt.shape) == 3: + cwt_gt = cwt_gt[0] + if isinstance(cwt_gt, torch.Tensor): + cwt_gt = cwt_gt.cpu().numpy() + cwt_out = np.concatenate([cwt_out, cwt_gt], -1) + name = f'cwt_val_{batch_idx}' + self.logger.add_figure(name, spec_to_figure(cwt_out), self.global_step) + + def add_pitch_loss(self, output, sample, losses): + if hparams['pitch_type'] == 'cwt': + cwt_spec = sample[f'cwt_spec'] + f0_mean = sample['f0_mean'] + uv = sample['uv'] + mel2ph = sample['mel2ph'] + f0_std = sample['f0_std'] + cwt_pred = output['cwt'][:, :, :10] + f0_mean_pred = output['f0_mean'] + f0_std_pred = output['f0_std'] + nonpadding = (mel2ph != 0).float() + losses['C'] = F.l1_loss(cwt_pred, cwt_spec) * hparams['lambda_f0'] + if hparams['use_uv']: + assert output['cwt'].shape[-1] == 11 + uv_pred = output['cwt'][:, :, -1] + losses['uv'] = (F.binary_cross_entropy_with_logits(uv_pred, uv, reduction='none') + * nonpadding).sum() / nonpadding.sum() * hparams['lambda_uv'] + losses['f0_mean'] = F.l1_loss(f0_mean_pred, f0_mean) * hparams['lambda_f0'] + losses['f0_std'] = F.l1_loss(f0_std_pred, f0_std) * hparams['lambda_f0'] + else: + super(FastSpeech2OrigTask, self).add_pitch_loss(output, sample, losses) + + def add_energy_loss(self, output, sample, losses): + energy_pred, energy = output['energy_pred'], sample['energy'] + nonpadding = (energy != 0).float() + loss = (F.mse_loss(energy_pred, energy, reduction='none') * nonpadding).sum() / nonpadding.sum() + loss = loss * hparams['lambda_energy'] + losses['e'] = loss diff --git a/text_to_speech/tasks/tts/fs_adv.py b/text_to_speech/tasks/tts/fs_adv.py new file mode 100644 index 0000000000000000000000000000000000000000..af360054fb3184bc49338660c8b537db0426e168 --- /dev/null +++ b/text_to_speech/tasks/tts/fs_adv.py @@ -0,0 +1,260 @@ +import os +import torch +import torch.nn.functional as F +import torch.nn as nn +import numpy as np + +from text_to_speech.modules.tts.syntaspeech.multi_window_disc import Discriminator +from tasks.tts.fs import FastSpeechTask +from text_to_speech.modules.tts.fs import FastSpeech + +from text_to_speech.utils.audio.align import mel2token_to_dur +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.nn.model_utils import num_params +from text_to_speech.utils.commons.tensor_utils import tensors_to_scalars +from text_to_speech.utils.audio.pitch.utils import denorm_f0, norm_f0 +from text_to_speech.utils.audio.pitch_extractors import get_pitch +from text_to_speech.utils.metrics.dtw import dtw as DTW + +from text_to_speech.utils.plot.plot import spec_to_figure +from text_to_speech.utils.text.text_encoder import build_token_encoder + + +class FastSpeechAdvTask(FastSpeechTask): + def __init__(self): + super().__init__() + self.build_disc_model() + self.mse_loss_fn = torch.nn.MSELoss() + + def build_tts_model(self): + dict_size = len(self.token_encoder) + self.model = FastSpeech(dict_size, hparams) + self.gen_params = [p for p in self.model.parameters() if p.requires_grad] + self.dp_params = [p for k, p in self.model.named_parameters() if (('dur_predictor' in k) and p.requires_grad)] + self.gen_params_except_dp = [p for k, p in self.model.named_parameters() if (('dur_predictor' not in k) and p.requires_grad)] + self.bert_params = [p for k, p in self.model.named_parameters() if (('bert' in k) and p.requires_grad)] + self.gen_params_except_bert_and_dp = [p for k, p in self.model.named_parameters() if ('dur_predictor' not in k) and ('bert' not in k) and p.requires_grad ] + self.use_bert = True if len(self.bert_params) > 0 else False + + + def build_disc_model(self): + disc_win_num = hparams['disc_win_num'] + h = hparams['mel_disc_hidden_size'] + self.mel_disc = Discriminator( + time_lengths=[32, 64, 128][:disc_win_num], + freq_length=80, hidden_size=h, kernel=(3, 3) + ) + self.disc_params = list(self.mel_disc.parameters()) + + def _training_step(self, sample, batch_idx, optimizer_idx): + loss_output = {} + loss_weights = {} + disc_start = self.global_step >= hparams["disc_start_steps"] and hparams['lambda_mel_adv'] > 0 + if optimizer_idx == 0: + ####################### + # Generator # + ####################### + loss_output, model_out = self.run_model(sample, infer=False) + self.model_out_gt = self.model_out = \ + {k: v.detach() for k, v in model_out.items() if isinstance(v, torch.Tensor)} + if disc_start: + mel_p = model_out['mel_out'] + if hasattr(self.model, 'out2mel'): + mel_p = self.model.out2mel(mel_p) + o_ = self.mel_disc(mel_p) + p_, pc_ = o_['y'], o_['y_c'] + if p_ is not None: + loss_output['a'] = self.mse_loss_fn(p_, p_.new_ones(p_.size())) + loss_weights['a'] = hparams['lambda_mel_adv'] + if pc_ is not None: + loss_output['ac'] = self.mse_loss_fn(pc_, pc_.new_ones(pc_.size())) + loss_weights['ac'] = hparams['lambda_mel_adv'] + else: + ####################### + # Discriminator # + ####################### + if disc_start and self.global_step % hparams['disc_interval'] == 0: + model_out = self.model_out_gt + mel_g = sample['mels'] + mel_p = model_out['mel_out'] + o = self.mel_disc(mel_g) + p, pc = o['y'], o['y_c'] + o_ = self.mel_disc(mel_p) + p_, pc_ = o_['y'], o_['y_c'] + if p_ is not None: + loss_output["r"] = self.mse_loss_fn(p, p.new_ones(p.size())) + loss_output["f"] = self.mse_loss_fn(p_, p_.new_zeros(p_.size())) + if pc_ is not None: + loss_output["rc"] = self.mse_loss_fn(pc, pc.new_ones(pc.size())) + loss_output["fc"] = self.mse_loss_fn(pc_, pc_.new_zeros(pc_.size())) + else: + return None + total_loss = sum([loss_weights.get(k, 1) * v for k, v in loss_output.items() if isinstance(v, torch.Tensor) and v.requires_grad]) + loss_output['batch_size'] = sample['txt_tokens'].size()[0] + return total_loss, loss_output + + + def validation_step(self, sample, batch_idx): + outputs = {} + outputs['losses'] = {} + outputs['losses'], model_out = self.run_model(sample) + outputs['total_loss'] = sum(outputs['losses'].values()) + outputs['nsamples'] = sample['nsamples'] + outputs = tensors_to_scalars(outputs) + if self.global_step % hparams['valid_infer_interval'] == 0 \ + and batch_idx < hparams['num_valid_plots']: + valid_results = self.save_valid_result(sample, batch_idx, model_out) + wav_gt = valid_results['wav_gt'] + mel_gt = valid_results['mel_gt'] + wav_pred = valid_results['wav_pred'] + mel_pred = valid_results['mel_pred'] + f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams) + f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams) + manhattan_distance = lambda x, y: np.abs(x - y) + dist, cost, acc, path = DTW(f0_pred_, f0_gt_, manhattan_distance) + outputs['losses']['f0_dtw'] = dist / len(f0_gt_) + return outputs + + def save_valid_result(self, sample, batch_idx, model_out): + sr = hparams['audio_sample_rate'] + f0_gt = None + mel_out = model_out['mel_out'] + if sample.get('f0') is not None: + f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu()) + self.plot_mel(batch_idx, sample['mels'], mel_out, f0s=f0_gt) + + # if self.global_step > 0: + wav_pred = self.vocoder.spec2wav(mel_out[0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_val_{batch_idx}', wav_pred, self.global_step, sr) + # with gt duration + model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True) + dur_info = self.get_plot_dur_info(sample, model_out) + del dur_info['dur_pred'] + wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_gdur_{batch_idx}', + dur_info=dur_info, f0s=f0_gt) + + # with pred duration + if not hparams['use_gt_dur']: + model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False) + dur_info = self.get_plot_dur_info(sample, model_out) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_pdur_{batch_idx}', + dur_info=dur_info, f0s=f0_gt) + wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr) + # gt wav + mel_gt = sample['mels'][0].cpu() + wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt) + if self.global_step <= hparams['valid_infer_interval']: + self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr) + + # add attn plot + # if self.global_step > 0 and hparams['dur_level'] == 'word': + # self.logger.add_figure(f'attn_{batch_idx}', spec_to_figure(model_out['attn'][0]), self.global_step) + + return {'wav_gt': wav_gt, 'wav_pred': wav_pred, 'mel_gt': mel_gt, 'mel_pred': model_out['mel_out'][0].cpu()} + + + def get_plot_dur_info(self, sample, model_out): + # if hparams['dur_level'] == 'word': + # T_txt = sample['word_lengths'].max() + # dur_gt = mel2token_to_dur(sample['mel2word'], T_txt)[0] + # dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt + # txt = sample['ph_words'][0].split(" ") + # else: + T_txt = sample['txt_tokens'].shape[1] + dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0] + dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt + txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy()) + txt = txt.split(" ") + return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt} + + def build_optimizer(self, model): + + optimizer_gen = torch.optim.AdamW( + self.gen_params, + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + + optimizer_disc = torch.optim.AdamW( + self.disc_params, + lr=hparams['disc_lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + **hparams["discriminator_optimizer_params"]) if len(self.disc_params) > 0 else None + + return [optimizer_gen, optimizer_disc] + + def build_scheduler(self, optimizer): + return [ + FastSpeechTask.build_scheduler(self, optimizer[0]), # Generator Scheduler + torch.optim.lr_scheduler.StepLR(optimizer=optimizer[1], # Discriminator Scheduler + **hparams["discriminator_scheduler_params"]), + ] + + def on_before_optimization(self, opt_idx): + if opt_idx == 0: + nn.utils.clip_grad_norm_(self.dp_params, hparams['clip_grad_norm']) + if self.use_bert: + nn.utils.clip_grad_norm_(self.bert_params, hparams['clip_grad_norm']) + nn.utils.clip_grad_norm_(self.gen_params_except_bert_and_dp, hparams['clip_grad_norm']) + else: + nn.utils.clip_grad_norm_(self.gen_params_except_dp, hparams['clip_grad_norm']) + else: + nn.utils.clip_grad_norm_(self.disc_params, hparams["clip_grad_norm"]) + + def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx): + if self.scheduler is not None: + self.scheduler[0].step(self.global_step // hparams['accumulate_grad_batches']) + self.scheduler[1].step(self.global_step // hparams['accumulate_grad_batches']) + + ############ + # infer + ############ + def test_start(self): + super().test_start() + if hparams.get('save_attn', False): + os.makedirs(f'{self.gen_dir}/attn', exist_ok=True) + self.model.store_inverse_all() + + def test_step(self, sample, batch_idx): + assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference' + outputs = self.run_model(sample, infer=True) + text = sample['text'][0] + item_name = sample['item_name'][0] + tokens = sample['txt_tokens'][0].cpu().numpy() + mel_gt = sample['mels'][0].cpu().numpy() + mel_pred = outputs['mel_out'][0].cpu().numpy() + mel2ph = sample['mel2ph'][0].cpu().numpy() + mel2ph_pred = None + str_phs = self.token_encoder.decode(tokens, strip_padding=True) + base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]' + if text is not None: + base_fn += text.replace(":", "$3A")[:80] + base_fn = base_fn.replace(' ', '_') + gen_dir = self.gen_dir + wav_pred = self.vocoder.spec2wav(mel_pred) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred]) + if hparams['save_gt']: + wav_gt = self.vocoder.spec2wav(mel_gt) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph]) + if hparams.get('save_attn', False): + attn = outputs['attn'][0].cpu().numpy() + np.save(f'{gen_dir}/attn/{item_name}.npy', attn) + # save f0 for pitch dtw + f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams) + f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams) + np.save(f'{gen_dir}/f0/{item_name}.npy', f0_pred_) + np.save(f'{gen_dir}/f0/{item_name}_gt.npy', f0_gt_) + + print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}") + return { + 'item_name': item_name, + 'text': text, + 'ph_tokens': self.token_encoder.decode(tokens.tolist()), + 'wav_fn_pred': base_fn % 'P', + 'wav_fn_gt': base_fn % 'G', + } diff --git a/text_to_speech/tasks/tts/fs_adv_aishell.py b/text_to_speech/tasks/tts/fs_adv_aishell.py new file mode 100644 index 0000000000000000000000000000000000000000..9956f8211ceaec80e42761caea5aa2d5160a4fac --- /dev/null +++ b/text_to_speech/tasks/tts/fs_adv_aishell.py @@ -0,0 +1,267 @@ +import os +import torch +import torch.nn.functional as F +import torch.nn as nn +import numpy as np + +from text_to_speech.modules.tts.syntaspeech.multi_window_disc import Discriminator +from tasks.tts.fs import FastSpeechTask +from text_to_speech.modules.tts.fs import FastSpeech + +from text_to_speech.utils.audio.align import mel2token_to_dur +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.nn.model_utils import num_params +from text_to_speech.utils.commons.tensor_utils import tensors_to_scalars +from text_to_speech.utils.audio.pitch.utils import denorm_f0, norm_f0 +from text_to_speech.utils.audio.pitch_extractors import get_pitch +from text_to_speech.utils.metrics.dtw import dtw as DTW + +from text_to_speech.utils.plot.plot import spec_to_figure +from text_to_speech.utils.text.text_encoder import build_token_encoder + + +class FastSpeechAdvTask(FastSpeechTask): + def __init__(self): + super().__init__() + self.build_disc_model() + self.mse_loss_fn = torch.nn.MSELoss() + + def build_tts_model(self): + dict_size = len(self.token_encoder) + self.model = FastSpeech(dict_size, hparams) + self.gen_params = [p for p in self.model.parameters() if p.requires_grad] + self.dp_params = [p for k, p in self.model.named_parameters() if (('dur_predictor' in k) and p.requires_grad)] + self.gen_params_except_dp = [p for k, p in self.model.named_parameters() if (('dur_predictor' not in k) and p.requires_grad)] + self.bert_params = [p for k, p in self.model.named_parameters() if (('bert' in k) and p.requires_grad)] + self.gen_params_except_bert_and_dp = [p for k, p in self.model.named_parameters() if ('dur_predictor' not in k) and ('bert' not in k) and p.requires_grad ] + self.use_bert = True if len(self.bert_params) > 0 else False + + + def build_disc_model(self): + disc_win_num = hparams['disc_win_num'] + h = hparams['mel_disc_hidden_size'] + self.mel_disc = Discriminator( + time_lengths=[32, 64, 128][:disc_win_num], + freq_length=80, hidden_size=h, kernel=(3, 3) + ) + self.disc_params = list(self.mel_disc.parameters()) + + def _training_step(self, sample, batch_idx, optimizer_idx): + loss_output = {} + loss_weights = {} + disc_start = self.global_step >= hparams["disc_start_steps"] and hparams['lambda_mel_adv'] > 0 + if optimizer_idx == 0: + ####################### + # Generator # + ####################### + loss_output, model_out = self.run_model(sample, infer=False) + self.model_out_gt = self.model_out = \ + {k: v.detach() for k, v in model_out.items() if isinstance(v, torch.Tensor)} + if disc_start: + mel_p = model_out['mel_out'] + if hasattr(self.model, 'out2mel'): + mel_p = self.model.out2mel(mel_p) + o_ = self.mel_disc(mel_p) + p_, pc_ = o_['y'], o_['y_c'] + if p_ is not None: + loss_output['a'] = self.mse_loss_fn(p_, p_.new_ones(p_.size())) + loss_weights['a'] = hparams['lambda_mel_adv'] + if pc_ is not None: + loss_output['ac'] = self.mse_loss_fn(pc_, pc_.new_ones(pc_.size())) + loss_weights['ac'] = hparams['lambda_mel_adv'] + else: + ####################### + # Discriminator # + ####################### + if disc_start and self.global_step % hparams['disc_interval'] == 0: + model_out = self.model_out_gt + mel_g = sample['mels'] + mel_p = model_out['mel_out'] + o = self.mel_disc(mel_g) + p, pc = o['y'], o['y_c'] + o_ = self.mel_disc(mel_p) + p_, pc_ = o_['y'], o_['y_c'] + if p_ is not None: + loss_output["r"] = self.mse_loss_fn(p, p.new_ones(p.size())) + loss_output["f"] = self.mse_loss_fn(p_, p_.new_zeros(p_.size())) + if pc_ is not None: + loss_output["rc"] = self.mse_loss_fn(pc, pc.new_ones(pc.size())) + loss_output["fc"] = self.mse_loss_fn(pc_, pc_.new_zeros(pc_.size())) + else: + return None + total_loss = sum([loss_weights.get(k, 1) * v for k, v in loss_output.items() if isinstance(v, torch.Tensor) and v.requires_grad]) + loss_output['batch_size'] = sample['txt_tokens'].size()[0] + return total_loss, loss_output + + def validation_start(self): + self.vocoder = None + + def validation_step(self, sample, batch_idx): + outputs = {} + outputs['losses'] = {} + outputs['losses'], model_out = self.run_model(sample) + outputs['total_loss'] = sum(outputs['losses'].values()) + outputs['nsamples'] = sample['nsamples'] + outputs = tensors_to_scalars(outputs) + if self.global_step % hparams['valid_infer_interval'] == 0 \ + and batch_idx < hparams['num_valid_plots']: + valid_results = self.save_valid_result(sample, batch_idx, model_out) + # wav_gt = valid_results['wav_gt'] + mel_gt = valid_results['mel_gt'] + # wav_pred = valid_results['wav_pred'] + mel_pred = valid_results['mel_pred'] + # f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams) + # f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams) + # manhattan_distance = lambda x, y: np.abs(x - y) + # dist, cost, acc, path = DTW(f0_pred_, f0_gt_, manhattan_distance) + # outputs['losses']['f0_dtw'] = dist / len(f0_gt_) + return outputs + + def save_valid_result(self, sample, batch_idx, model_out): + sr = hparams['audio_sample_rate'] + f0_gt = None + mel_out = model_out['mel_out'] + if sample.get('f0') is not None: + f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu()) + self.plot_mel(batch_idx, sample['mels'], mel_out, f0s=f0_gt) + + # if self.global_step > 0: + if self.vocoder is not None: + wav_pred = self.vocoder.spec2wav(mel_out[0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_val_{batch_idx}', wav_pred, self.global_step, sr) + # with gt duration + model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True) + dur_info = self.get_plot_dur_info(sample, model_out) + del dur_info['dur_pred'] + if self.vocoder is not None: + wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_gdur_{batch_idx}', + dur_info=dur_info, f0s=f0_gt) + + # with pred duration + if not hparams['use_gt_dur']: + model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False) + dur_info = self.get_plot_dur_info(sample, model_out) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_pdur_{batch_idx}', + dur_info=dur_info, f0s=f0_gt) + if self.vocoder is not None: + wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr) + # gt wav + mel_gt = sample['mels'][0].cpu() + if self.vocoder is not None: + wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt) + if self.global_step <= hparams['valid_infer_interval']: + self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr) + + # add attn plot + # if self.global_step > 0 and hparams['dur_level'] == 'word': + # self.logger.add_figure(f'attn_{batch_idx}', spec_to_figure(model_out['attn'][0]), self.global_step) + + return {'mel_gt': mel_gt, 'mel_pred': model_out['mel_out'][0].cpu()} + # return {'wav_gt': wav_gt, 'wav_pred': wav_pred, 'mel_gt': mel_gt, 'mel_pred': model_out['mel_out'][0].cpu()} + + + def get_plot_dur_info(self, sample, model_out): + # if hparams['dur_level'] == 'word': + # T_txt = sample['word_lengths'].max() + # dur_gt = mel2token_to_dur(sample['mel2word'], T_txt)[0] + # dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt + # txt = sample['ph_words'][0].split(" ") + # else: + T_txt = sample['txt_tokens'].shape[1] + dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0] + dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt + txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy()) + txt = txt.split(" ") + return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt} + + def build_optimizer(self, model): + + optimizer_gen = torch.optim.AdamW( + self.gen_params, + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + + optimizer_disc = torch.optim.AdamW( + self.disc_params, + lr=hparams['disc_lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + **hparams["discriminator_optimizer_params"]) if len(self.disc_params) > 0 else None + + return [optimizer_gen, optimizer_disc] + + def build_scheduler(self, optimizer): + return [ + FastSpeechTask.build_scheduler(self, optimizer[0]), # Generator Scheduler + torch.optim.lr_scheduler.StepLR(optimizer=optimizer[1], # Discriminator Scheduler + **hparams["discriminator_scheduler_params"]), + ] + + def on_before_optimization(self, opt_idx): + if opt_idx == 0: + nn.utils.clip_grad_norm_(self.dp_params, hparams['clip_grad_norm']) + if self.use_bert: + nn.utils.clip_grad_norm_(self.bert_params, hparams['clip_grad_norm']) + nn.utils.clip_grad_norm_(self.gen_params_except_bert_and_dp, hparams['clip_grad_norm']) + else: + nn.utils.clip_grad_norm_(self.gen_params_except_dp, hparams['clip_grad_norm']) + else: + nn.utils.clip_grad_norm_(self.disc_params, hparams["clip_grad_norm"]) + + def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx): + if self.scheduler is not None: + self.scheduler[0].step(self.global_step // hparams['accumulate_grad_batches']) + self.scheduler[1].step(self.global_step // hparams['accumulate_grad_batches']) + + ############ + # infer + ############ + def test_start(self): + super().test_start() + if hparams.get('save_attn', False): + os.makedirs(f'{self.gen_dir}/attn', exist_ok=True) + self.model.store_inverse_all() + + def test_step(self, sample, batch_idx): + assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference' + outputs = self.run_model(sample, infer=True) + text = sample['text'][0] + item_name = sample['item_name'][0] + tokens = sample['txt_tokens'][0].cpu().numpy() + mel_gt = sample['mels'][0].cpu().numpy() + mel_pred = outputs['mel_out'][0].cpu().numpy() + mel2ph = sample['mel2ph'][0].cpu().numpy() + mel2ph_pred = None + str_phs = self.token_encoder.decode(tokens, strip_padding=True) + base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]' + if text is not None: + base_fn += text.replace(":", "$3A")[:80] + base_fn = base_fn.replace(' ', '_') + gen_dir = self.gen_dir + wav_pred = self.vocoder.spec2wav(mel_pred) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred]) + if hparams['save_gt']: + wav_gt = self.vocoder.spec2wav(mel_gt) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph]) + if hparams.get('save_attn', False): + attn = outputs['attn'][0].cpu().numpy() + np.save(f'{gen_dir}/attn/{item_name}.npy', attn) + # save f0 for pitch dtw + f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams) + f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams) + np.save(f'{gen_dir}/f0/{item_name}.npy', f0_pred_) + np.save(f'{gen_dir}/f0/{item_name}_gt.npy', f0_gt_) + + print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}") + return { + 'item_name': item_name, + 'text': text, + 'ph_tokens': self.token_encoder.decode(tokens.tolist()), + 'wav_fn_pred': base_fn % 'P', + 'wav_fn_gt': base_fn % 'G', + } diff --git a/text_to_speech/tasks/tts/ps.py b/text_to_speech/tasks/tts/ps.py new file mode 100644 index 0000000000000000000000000000000000000000..454a3a3016555f6ab20920890f9949c3671d4c8c --- /dev/null +++ b/text_to_speech/tasks/tts/ps.py @@ -0,0 +1,194 @@ +import os +import torch +import torch.nn.functional as F +from torch import nn + +from text_to_speech.modules.tts.portaspeech.portaspeech import PortaSpeech +from tasks.tts.fs import FastSpeechTask +from text_to_speech.utils.audio.align import mel2token_to_dur +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.metrics.diagonal_metrics import get_focus_rate, get_phone_coverage_rate, get_diagonal_focus_rate +from text_to_speech.utils.nn.model_utils import num_params +import numpy as np + +from text_to_speech.utils.plot.plot import spec_to_figure +from text_to_speech.utils.text.text_encoder import build_token_encoder + + +class PortaSpeechTask(FastSpeechTask): + def __init__(self): + super().__init__() + data_dir = hparams['binary_data_dir'] + self.word_encoder = build_token_encoder(f'{data_dir}/word_set.json') + + def build_tts_model(self): + ph_dict_size = len(self.token_encoder) + word_dict_size = len(self.word_encoder) + self.model = PortaSpeech(ph_dict_size, word_dict_size, hparams) + + def on_train_start(self): + super().on_train_start() + for n, m in self.model.named_children(): + num_params(m, model_name=n) + if hasattr(self.model, 'fvae'): + for n, m in self.model.fvae.named_children(): + num_params(m, model_name=f'fvae.{n}') + + def run_model(self, sample, infer=False, *args, **kwargs): + txt_tokens = sample['txt_tokens'] + word_tokens = sample['word_tokens'] + spk_embed = sample.get('spk_embed') + spk_id = sample.get('spk_ids') + if not infer: + output = self.model(txt_tokens, word_tokens, + ph2word=sample['ph2word'], + mel2word=sample['mel2word'], + mel2ph=sample['mel2ph'], + word_len=sample['word_lengths'].max(), + tgt_mels=sample['mels'], + pitch=sample.get('pitch'), + spk_embed=spk_embed, + spk_id=spk_id, + infer=False, + global_step=self.global_step) + losses = {} + losses['kl_v'] = output['kl'].detach() + losses_kl = output['kl'] + losses_kl = torch.clamp(losses_kl, min=hparams['kl_min']) + losses_kl = min(self.global_step / hparams['kl_start_steps'], 1) * losses_kl + losses_kl = losses_kl * hparams['lambda_kl'] + losses['kl'] = losses_kl + self.add_mel_loss(output['mel_out'], sample['mels'], losses) + if hparams['dur_level'] == 'word': + self.add_dur_loss( + output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses) + self.get_attn_stats(output['attn'], sample, losses) + else: + super(PortaSpeechTask, self).add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses) + return losses, output + else: + use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) + output = self.model( + txt_tokens, word_tokens, + ph2word=sample['ph2word'], + word_len=sample['word_lengths'].max(), + pitch=sample.get('pitch'), + mel2ph=sample['mel2ph'] if use_gt_dur else None, + mel2word=sample['mel2word'] if use_gt_dur else None, + tgt_mels=sample['mels'], + infer=True, + spk_embed=spk_embed, + spk_id=spk_id, + ) + return output + + def add_dur_loss(self, dur_pred, mel2token, word_len, txt_tokens, losses=None): + T = word_len.max() + dur_gt = mel2token_to_dur(mel2token, T).float() + nonpadding = (torch.arange(T).to(dur_pred.device)[None, :] < word_len[:, None]).float() + dur_pred = dur_pred * nonpadding + dur_gt = dur_gt * nonpadding + wdur = F.l1_loss((dur_pred + 1).log(), (dur_gt + 1).log(), reduction='none') + wdur = (wdur * nonpadding).sum() / nonpadding.sum() + if hparams['lambda_word_dur'] > 0: + losses['wdur'] = wdur * hparams['lambda_word_dur'] + if hparams['lambda_sent_dur'] > 0: + sent_dur_p = dur_pred.sum(-1) + sent_dur_g = dur_gt.sum(-1) + sdur_loss = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean') + losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] + + def validation_step(self, sample, batch_idx): + return super().validation_step(sample, batch_idx) + + def save_valid_result(self, sample, batch_idx, model_out): + super(PortaSpeechTask, self).save_valid_result(sample, batch_idx, model_out) + if self.global_step > 0 and hparams['dur_level'] == 'word': + self.logger.add_figure(f'attn_{batch_idx}', spec_to_figure(model_out['attn'][0]), self.global_step) + + def get_attn_stats(self, attn, sample, logging_outputs, prefix=''): + # diagonal_focus_rate + txt_lengths = sample['txt_lengths'].float() + mel_lengths = sample['mel_lengths'].float() + src_padding_mask = sample['txt_tokens'].eq(0) + target_padding_mask = sample['mels'].abs().sum(-1).eq(0) + src_seg_mask = sample['txt_tokens'].eq(self.seg_idx) + attn_ks = txt_lengths.float() / mel_lengths.float() + + focus_rate = get_focus_rate(attn, src_padding_mask, target_padding_mask).mean().data + phone_coverage_rate = get_phone_coverage_rate( + attn, src_padding_mask, src_seg_mask, target_padding_mask).mean() + diagonal_focus_rate, diag_mask = get_diagonal_focus_rate( + attn, attn_ks, mel_lengths, src_padding_mask, target_padding_mask) + logging_outputs[f'{prefix}fr'] = focus_rate.mean().data + logging_outputs[f'{prefix}pcr'] = phone_coverage_rate.mean().data + logging_outputs[f'{prefix}dfr'] = diagonal_focus_rate.mean().data + + def get_plot_dur_info(self, sample, model_out): + if hparams['dur_level'] == 'word': + T_txt = sample['word_lengths'].max() + dur_gt = mel2token_to_dur(sample['mel2word'], T_txt)[0] + dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt + txt = sample['ph_words'][0].split(" ") + else: + T_txt = sample['txt_tokens'].shape[1] + dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0] + dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt + txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy()) + txt = txt.split(" ") + return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt} + + def build_optimizer(self, model): + self.optimizer = torch.optim.AdamW( + self.model.parameters(), + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + return self.optimizer + + def build_scheduler(self, optimizer): + return FastSpeechTask.build_scheduler(self, optimizer) + + ############ + # infer + ############ + def test_start(self): + super().test_start() + if hparams.get('save_attn', False): + os.makedirs(f'{self.gen_dir}/attn', exist_ok=True) + self.model.store_inverse_all() + + def test_step(self, sample, batch_idx): + assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference' + outputs = self.run_model(sample, infer=True) + text = sample['text'][0] + item_name = sample['item_name'][0] + tokens = sample['txt_tokens'][0].cpu().numpy() + mel_gt = sample['mels'][0].cpu().numpy() + mel_pred = outputs['mel_out'][0].cpu().numpy() + mel2ph = sample['mel2ph'][0].cpu().numpy() + mel2ph_pred = None + str_phs = self.token_encoder.decode(tokens, strip_padding=True) + base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]' + if text is not None: + base_fn += text.replace(":", "$3A")[:80] + base_fn = base_fn.replace(' ', '_') + gen_dir = self.gen_dir + wav_pred = self.vocoder.spec2wav(mel_pred) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred]) + if hparams['save_gt']: + wav_gt = self.vocoder.spec2wav(mel_gt) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph]) + if hparams.get('save_attn', False): + attn = outputs['attn'][0].cpu().numpy() + np.save(f'{gen_dir}/attn/{item_name}.npy', attn) + print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}") + return { + 'item_name': item_name, + 'text': text, + 'ph_tokens': self.token_encoder.decode(tokens.tolist()), + 'wav_fn_pred': base_fn % 'P', + 'wav_fn_gt': base_fn % 'G', + } diff --git a/text_to_speech/tasks/tts/ps_adv.py b/text_to_speech/tasks/tts/ps_adv.py new file mode 100644 index 0000000000000000000000000000000000000000..f86409254b8d0d5f00de82cc0a9eed93cc8a40dc --- /dev/null +++ b/text_to_speech/tasks/tts/ps_adv.py @@ -0,0 +1,374 @@ +import os +import torch +import torch.nn.functional as F +import torch.nn as nn +import numpy as np + +from text_to_speech.modules.tts.portaspeech.portaspeech import PortaSpeech +from text_to_speech.modules.tts.syntaspeech.multi_window_disc import Discriminator +from tasks.tts.fs import FastSpeechTask +from text_to_speech.utils.audio.align import mel2token_to_dur +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.metrics.diagonal_metrics import get_focus_rate, get_phone_coverage_rate, get_diagonal_focus_rate +from text_to_speech.utils.nn.model_utils import num_params +from text_to_speech.utils.commons.tensor_utils import tensors_to_scalars +from text_to_speech.utils.audio.pitch.utils import denorm_f0, norm_f0 +from text_to_speech.utils.audio.pitch_extractors import get_pitch +from text_to_speech.utils.metrics.dtw import dtw as DTW + +from text_to_speech.utils.plot.plot import spec_to_figure +from text_to_speech.utils.text.text_encoder import build_token_encoder + + +class PortaSpeechAdvTask(FastSpeechTask): + def __init__(self): + super().__init__() + data_dir = hparams['binary_data_dir'] + self.word_encoder = build_token_encoder(f'{data_dir}/word_set.json') + self.build_disc_model() + self.mse_loss_fn = torch.nn.MSELoss() + + def build_tts_model(self): + ph_dict_size = len(self.token_encoder) + word_dict_size = len(self.word_encoder) + self.model = PortaSpeech(ph_dict_size, word_dict_size, hparams) + + self.gen_params = [p for p in self.model.parameters() if p.requires_grad] + self.dp_params = [p for k, p in self.model.named_parameters() if (('dur_predictor' in k) and p.requires_grad)] + self.gen_params_except_dp = [p for k, p in self.model.named_parameters() if (('dur_predictor' not in k) and p.requires_grad)] + self.bert_params = [p for k, p in self.model.named_parameters() if (('bert' in k) and p.requires_grad)] + self.gen_params_except_bert_and_dp = [p for k, p in self.model.named_parameters() if ('dur_predictor' not in k) and ('bert' not in k) and p.requires_grad ] + + self.use_bert = True if len(self.bert_params) > 0 else False + + def build_disc_model(self): + disc_win_num = hparams['disc_win_num'] + h = hparams['mel_disc_hidden_size'] + self.mel_disc = Discriminator( + time_lengths=[32, 64, 128][:disc_win_num], + freq_length=80, hidden_size=h, kernel=(3, 3) + ) + self.disc_params = list(self.mel_disc.parameters()) + + def on_train_start(self): + super().on_train_start() + for n, m in self.model.named_children(): + num_params(m, model_name=n) + if hasattr(self.model, 'fvae'): + for n, m in self.model.fvae.named_children(): + num_params(m, model_name=f'fvae.{n}') + + def _training_step(self, sample, batch_idx, optimizer_idx): + loss_output = {} + loss_weights = {} + disc_start = self.global_step >= hparams["disc_start_steps"] and hparams['lambda_mel_adv'] > 0 + if optimizer_idx == 0: + ####################### + # Generator # + ####################### + loss_output, model_out = self.run_model(sample, infer=False) + self.model_out_gt = self.model_out = \ + {k: v.detach() for k, v in model_out.items() if isinstance(v, torch.Tensor)} + if disc_start: + mel_p = model_out['mel_out'] + if hasattr(self.model, 'out2mel'): + mel_p = self.model.out2mel(mel_p) + o_ = self.mel_disc(mel_p) + p_, pc_ = o_['y'], o_['y_c'] + if p_ is not None: + loss_output['a'] = self.mse_loss_fn(p_, p_.new_ones(p_.size())) + loss_weights['a'] = hparams['lambda_mel_adv'] + if pc_ is not None: + loss_output['ac'] = self.mse_loss_fn(pc_, pc_.new_ones(pc_.size())) + loss_weights['ac'] = hparams['lambda_mel_adv'] + else: + ####################### + # Discriminator # + ####################### + if disc_start and self.global_step % hparams['disc_interval'] == 0: + model_out = self.model_out_gt + mel_g = sample['mels'] + mel_p = model_out['mel_out'] + o = self.mel_disc(mel_g) + p, pc = o['y'], o['y_c'] + o_ = self.mel_disc(mel_p) + p_, pc_ = o_['y'], o_['y_c'] + if p_ is not None: + loss_output["r"] = self.mse_loss_fn(p, p.new_ones(p.size())) + loss_output["f"] = self.mse_loss_fn(p_, p_.new_zeros(p_.size())) + if pc_ is not None: + loss_output["rc"] = self.mse_loss_fn(pc, pc.new_ones(pc.size())) + loss_output["fc"] = self.mse_loss_fn(pc_, pc_.new_zeros(pc_.size())) + total_loss = sum([loss_weights.get(k, 1) * v for k, v in loss_output.items() if isinstance(v, torch.Tensor) and v.requires_grad]) + loss_output['batch_size'] = sample['txt_tokens'].size()[0] + return total_loss, loss_output + + def run_model(self, sample, infer=False, *args, **kwargs): + txt_tokens = sample['txt_tokens'] + word_tokens = sample['word_tokens'] + spk_embed = sample.get('spk_embed') + spk_id = sample.get('spk_ids') + if not infer: + output = self.model(txt_tokens, word_tokens, + ph2word=sample['ph2word'], + mel2word=sample['mel2word'], + mel2ph=sample['mel2ph'], + word_len=sample['word_lengths'].max(), + tgt_mels=sample['mels'], + pitch=sample.get('pitch'), + spk_embed=spk_embed, + spk_id=spk_id, + infer=False, + global_step=self.global_step, + graph_lst=sample['graph_lst'], + etypes_lst=sample['etypes_lst'], + bert_feats=sample.get("bert_feats"), + cl_feats=sample.get("cl_feats") + ) + losses = {} + losses['kl_v'] = output['kl'].detach() + losses_kl = output['kl'] + losses_kl = torch.clamp(losses_kl, min=hparams['kl_min']) + losses_kl = min(self.global_step / hparams['kl_start_steps'], 1) * losses_kl + losses_kl = losses_kl * hparams['lambda_kl'] + losses['kl'] = losses_kl + + self.add_mel_loss(output['mel_out'], sample['mels'], losses) + if hparams['dur_level'] == 'word': + self.add_dur_loss( + output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses) + self.get_attn_stats(output['attn'], sample, losses) + else: + super(PortaSpeechAdvTask, self).add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses) + return losses, output + else: + use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) + output = self.model( + txt_tokens, word_tokens, + ph2word=sample['ph2word'], + word_len=sample['word_lengths'].max(), + pitch=sample.get('pitch'), + mel2ph=sample['mel2ph'] if use_gt_dur else None, + mel2word=sample['mel2word'] if use_gt_dur else None, + tgt_mels=sample['mels'], + infer=True, + spk_embed=spk_embed, + spk_id=spk_id, + graph_lst=sample['graph_lst'], + etypes_lst=sample['etypes_lst'], + bert_feats=sample.get("bert_feats"), + cl_feats=sample.get("cl_feats") + ) + return output + + def add_dur_loss(self, dur_pred, mel2token, word_len, txt_tokens, losses=None): + T = word_len.max() + dur_gt = mel2token_to_dur(mel2token, T).float() + nonpadding = (torch.arange(T).to(dur_pred.device)[None, :] < word_len[:, None]).float() + dur_pred = dur_pred * nonpadding + dur_gt = dur_gt * nonpadding + wdur = F.l1_loss((dur_pred + 1).log(), (dur_gt + 1).log(), reduction='none') + wdur = (wdur * nonpadding).sum() / nonpadding.sum() + + if hparams['lambda_word_dur'] > 0: + losses['wdur'] = wdur * hparams['lambda_word_dur'] + if hparams['lambda_sent_dur'] > 0: + sent_dur_p = dur_pred.sum(-1) + sent_dur_g = dur_gt.sum(-1) + sdur_loss = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean') + losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] + + with torch.no_grad(): + # calculate word-level abs_dur_error in micro-second + abs_word_dur_error = F.l1_loss(dur_pred , dur_gt, reduction='none') + abs_word_dur_error = (abs_word_dur_error * nonpadding).sum() / nonpadding.sum() + abs_word_dur_error = abs_word_dur_error * hparams['hop_size'] / hparams['audio_sample_rate'] * 1000 + losses['abs_word_dur_error'] = abs_word_dur_error + # calculate word-level abs_dur_error in second + sent_dur_p = dur_pred.sum(-1) + sent_dur_g = dur_gt.sum(-1) + abs_sent_dur_error = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean').mean() + abs_sent_dur_error = abs_sent_dur_error * hparams['hop_size'] / hparams['audio_sample_rate'] + losses['abs_sent_dur_error'] = abs_sent_dur_error + + def validation_step(self, sample, batch_idx): + outputs = {} + outputs['losses'] = {} + outputs['losses'], model_out = self.run_model(sample) + outputs['total_loss'] = sum(outputs['losses'].values()) + outputs['nsamples'] = sample['nsamples'] + outputs = tensors_to_scalars(outputs) + if self.global_step % hparams['valid_infer_interval'] == 0 \ + and batch_idx < hparams['num_valid_plots']: + valid_results = self.save_valid_result(sample, batch_idx, model_out) + wav_gt = valid_results['wav_gt'] + mel_gt = valid_results['mel_gt'] + wav_pred = valid_results['wav_pred'] + mel_pred = valid_results['mel_pred'] + f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams) + f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams) + manhattan_distance = lambda x, y: np.abs(x - y) + dist, cost, acc, path = DTW(f0_pred_, f0_gt_, manhattan_distance) + outputs['losses']['f0_dtw'] = dist / len(f0_gt_) + return outputs + + def save_valid_result(self, sample, batch_idx, model_out): + sr = hparams['audio_sample_rate'] + f0_gt = None + mel_out = model_out['mel_out'] + if sample.get('f0') is not None: + f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu()) + self.plot_mel(batch_idx, sample['mels'], mel_out, f0s=f0_gt) + + # if self.global_step > 0: + wav_pred = self.vocoder.spec2wav(mel_out[0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_val_{batch_idx}', wav_pred, self.global_step, sr) + # with gt duration + model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True) + dur_info = self.get_plot_dur_info(sample, model_out) + del dur_info['dur_pred'] + wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_gdur_{batch_idx}', + dur_info=dur_info, f0s=f0_gt) + + # with pred duration + if not hparams['use_gt_dur']: + model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False) + dur_info = self.get_plot_dur_info(sample, model_out) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_pdur_{batch_idx}', + dur_info=dur_info, f0s=f0_gt) + wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr) + # gt wav + mel_gt = sample['mels'][0].cpu() + wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt) + if self.global_step <= hparams['valid_infer_interval']: + self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr) + + # add attn plot + if self.global_step > 0 and hparams['dur_level'] == 'word': + self.logger.add_figure(f'attn_{batch_idx}', spec_to_figure(model_out['attn'][0]), self.global_step) + + return {'wav_gt': wav_gt, 'wav_pred': wav_pred, 'mel_gt': mel_gt, 'mel_pred': model_out['mel_out'][0].cpu()} + + def get_attn_stats(self, attn, sample, logging_outputs, prefix=''): + # diagonal_focus_rate + txt_lengths = sample['txt_lengths'].float() + mel_lengths = sample['mel_lengths'].float() + src_padding_mask = sample['txt_tokens'].eq(0) + target_padding_mask = sample['mels'].abs().sum(-1).eq(0) + src_seg_mask = sample['txt_tokens'].eq(self.seg_idx) + attn_ks = txt_lengths.float() / mel_lengths.float() + + focus_rate = get_focus_rate(attn, src_padding_mask, target_padding_mask).mean().data + phone_coverage_rate = get_phone_coverage_rate( + attn, src_padding_mask, src_seg_mask, target_padding_mask).mean() + diagonal_focus_rate, diag_mask = get_diagonal_focus_rate( + attn, attn_ks, mel_lengths, src_padding_mask, target_padding_mask) + logging_outputs[f'{prefix}fr'] = focus_rate.mean().data + logging_outputs[f'{prefix}pcr'] = phone_coverage_rate.mean().data + logging_outputs[f'{prefix}dfr'] = diagonal_focus_rate.mean().data + + def get_plot_dur_info(self, sample, model_out): + if hparams['dur_level'] == 'word': + T_txt = sample['word_lengths'].max() + dur_gt = mel2token_to_dur(sample['mel2word'], T_txt)[0] + dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt + txt = sample['ph_words'][0].split(" ") + else: + T_txt = sample['txt_tokens'].shape[1] + dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0] + dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt + txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy()) + txt = txt.split(" ") + return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt} + + def build_optimizer(self, model): + + optimizer_gen = torch.optim.AdamW( + self.gen_params, + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + + optimizer_disc = torch.optim.AdamW( + self.disc_params, + lr=hparams['disc_lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + **hparams["discriminator_optimizer_params"]) if len(self.disc_params) > 0 else None + + return [optimizer_gen, optimizer_disc] + + def build_scheduler(self, optimizer): + return [ + FastSpeechTask.build_scheduler(self, optimizer[0]), # Generator Scheduler + torch.optim.lr_scheduler.StepLR(optimizer=optimizer[1], # Discriminator Scheduler + **hparams["discriminator_scheduler_params"]), + ] + + def on_before_optimization(self, opt_idx): + if opt_idx == 0: + nn.utils.clip_grad_norm_(self.dp_params, hparams['clip_grad_norm']) + if self.use_bert: + nn.utils.clip_grad_norm_(self.bert_params, hparams['clip_grad_norm']) + nn.utils.clip_grad_norm_(self.gen_params_except_bert_and_dp, hparams['clip_grad_norm']) + else: + nn.utils.clip_grad_norm_(self.gen_params_except_dp, hparams['clip_grad_norm']) + else: + nn.utils.clip_grad_norm_(self.disc_params, hparams["clip_grad_norm"]) + + def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx): + if self.scheduler is not None: + self.scheduler[0].step(self.global_step // hparams['accumulate_grad_batches']) + self.scheduler[1].step(self.global_step // hparams['accumulate_grad_batches']) + + ############ + # infer + ############ + def test_start(self): + super().test_start() + if hparams.get('save_attn', False): + os.makedirs(f'{self.gen_dir}/attn', exist_ok=True) + self.model.store_inverse_all() + + def test_step(self, sample, batch_idx): + assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference' + outputs = self.run_model(sample, infer=True) + text = sample['text'][0] + item_name = sample['item_name'][0] + tokens = sample['txt_tokens'][0].cpu().numpy() + mel_gt = sample['mels'][0].cpu().numpy() + mel_pred = outputs['mel_out'][0].cpu().numpy() + mel2ph = sample['mel2ph'][0].cpu().numpy() + mel2ph_pred = None + str_phs = self.token_encoder.decode(tokens, strip_padding=True) + base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]' + if text is not None: + base_fn += text.replace(":", "$3A")[:80] + base_fn = base_fn.replace(' ', '_') + gen_dir = self.gen_dir + wav_pred = self.vocoder.spec2wav(mel_pred) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred]) + if hparams['save_gt']: + wav_gt = self.vocoder.spec2wav(mel_gt) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph]) + if hparams.get('save_attn', False): + attn = outputs['attn'][0].cpu().numpy() + np.save(f'{gen_dir}/attn/{item_name}.npy', attn) + # save f0 for pitch dtw + f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams) + f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams) + np.save(f'{gen_dir}/f0/{item_name}.npy', f0_pred_) + np.save(f'{gen_dir}/f0/{item_name}_gt.npy', f0_gt_) + + print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}") + return { + 'item_name': item_name, + 'text': text, + 'ph_tokens': self.token_encoder.decode(tokens.tolist()), + 'wav_fn_pred': base_fn % 'P', + 'wav_fn_gt': base_fn % 'G', + } diff --git a/text_to_speech/tasks/tts/ps_adv_mlm.py b/text_to_speech/tasks/tts/ps_adv_mlm.py new file mode 100644 index 0000000000000000000000000000000000000000..24da9ef08b848130d9855a276e316425ddd10bc8 --- /dev/null +++ b/text_to_speech/tasks/tts/ps_adv_mlm.py @@ -0,0 +1,233 @@ +import torch +from torch import nn +from tasks.tts.ps_adv import PortaSpeechAdvTask, FastSpeechTask +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.nn.seq_utils import group_hidden_by_segs + + +class PortaSpeechAdvMLMTask(PortaSpeechAdvTask): + + def build_scheduler(self, optimizer): + return [ + FastSpeechTask.build_scheduler(self, optimizer[0]), # Generator Scheduler + torch.optim.lr_scheduler.StepLR(optimizer=optimizer[1], # Discriminator Scheduler + **hparams["discriminator_scheduler_params"]), + ] + + def on_before_optimization(self, opt_idx): + if opt_idx in [0, 2]: + nn.utils.clip_grad_norm_(self.dp_params, hparams['clip_grad_norm']) + if self.use_bert: + nn.utils.clip_grad_norm_(self.bert_params, hparams['clip_grad_norm']) + nn.utils.clip_grad_norm_(self.gen_params_except_bert_and_dp, hparams['clip_grad_norm']) + else: + nn.utils.clip_grad_norm_(self.gen_params_except_dp, hparams['clip_grad_norm']) + else: + nn.utils.clip_grad_norm_(self.disc_params, hparams["clip_grad_norm"]) + + def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx): + if self.scheduler is not None: + self.scheduler[0].step(self.global_step // hparams['accumulate_grad_batches']) + self.scheduler[1].step(self.global_step // hparams['accumulate_grad_batches']) + + + def _training_step(self, sample, batch_idx, optimizer_idx): + loss_output = {} + loss_weights = {} + disc_start = self.global_step >= hparams["disc_start_steps"] and hparams['lambda_mel_adv'] > 0 + if optimizer_idx == 0: + ####################### + # Generator # + ####################### + loss_output, model_out = self.run_model(sample, infer=False) + self.model_out_gt = self.model_out = \ + {k: v.detach() for k, v in model_out.items() if isinstance(v, torch.Tensor)} + if disc_start: + mel_p = model_out['mel_out'] + if hasattr(self.model, 'out2mel'): + mel_p = self.model.out2mel(mel_p) + o_ = self.mel_disc(mel_p) + p_, pc_ = o_['y'], o_['y_c'] + if p_ is not None: + loss_output['a'] = self.mse_loss_fn(p_, p_.new_ones(p_.size())) + loss_weights['a'] = hparams['lambda_mel_adv'] + if pc_ is not None: + loss_output['ac'] = self.mse_loss_fn(pc_, pc_.new_ones(pc_.size())) + loss_weights['ac'] = hparams['lambda_mel_adv'] + else: + return None + + loss_output2, model_out2 = self.run_contrastive_learning(sample) + loss_output.update(loss_output2) + model_out.update(model_out2) + + elif optimizer_idx == 1: + ####################### + # Discriminator # + ####################### + if disc_start and self.global_step % hparams['disc_interval'] == 0: + model_out = self.model_out_gt + mel_g = sample['mels'] + mel_p = model_out['mel_out'] + o = self.mel_disc(mel_g) + p, pc = o['y'], o['y_c'] + o_ = self.mel_disc(mel_p) + p_, pc_ = o_['y'], o_['y_c'] + if p_ is not None: + loss_output["r"] = self.mse_loss_fn(p, p.new_ones(p.size())) + loss_output["f"] = self.mse_loss_fn(p_, p_.new_zeros(p_.size())) + if pc_ is not None: + loss_output["rc"] = self.mse_loss_fn(pc, pc.new_ones(pc.size())) + loss_output["fc"] = self.mse_loss_fn(pc_, pc_.new_zeros(pc_.size())) + + total_loss = sum([loss_weights.get(k, 1) * v for k, v in loss_output.items() if isinstance(v, torch.Tensor) and v.requires_grad]) + loss_output['batch_size'] = sample['txt_tokens'].size()[0] + return total_loss, loss_output + + def run_contrastive_learning(self, sample): + losses = {} + outputs = {} + + bert = self.model.encoder.bert.bert + bert_for_mlm = self.model.encoder.bert + pooler = self.model.encoder.pooler + sim = self.model.encoder.sim + tokenizer = self.model.encoder.tokenizer + ph_encoder = self.model.encoder + + if hparams['lambda_cl'] > 0: + if hparams.get("cl_version", "v1") == "v1": + cl_feats = sample['cl_feats'] + bs, _, t = cl_feats['cl_input_ids'].shape + cl_input_ids = cl_feats['cl_input_ids'].reshape([bs*2, t]) + cl_attention_mask = cl_feats['cl_attention_mask'].reshape([bs*2, t]) + cl_token_type_ids = cl_feats['cl_token_type_ids'].reshape([bs*2, t]) + cl_output = bert(cl_input_ids, attention_mask=cl_attention_mask,token_type_ids=cl_token_type_ids,) + pooler_output = pooler(cl_attention_mask, cl_output) + pooler_output = pooler_output.reshape([bs, 2, -1]) + z1, z2 = pooler_output[:,0], pooler_output[:,1] + + cos_sim = sim(z1.unsqueeze(1), z2.unsqueeze(0)) + labels = torch.arange(cos_sim.size(0)).long().to(z1.device) + ce_fn = nn.CrossEntropyLoss() + cl_loss = ce_fn(cos_sim, labels) + losses['cl_v'] = cl_loss.detach() + losses['cl'] = cl_loss * hparams['lambda_cl'] + elif hparams['cl_version'] == "v2": + # use the output of ph encoder as sentence embedding + cl_feats = sample['cl_feats'] + bs, _, t = cl_feats['cl_input_ids'].shape + cl_input_ids = cl_feats['cl_input_ids'].reshape([bs*2, t]) + cl_attention_mask = cl_feats['cl_attention_mask'].reshape([bs*2, t]) + cl_token_type_ids = cl_feats['cl_token_type_ids'].reshape([bs*2, t]) + txt_tokens = sample['txt_tokens'] + bert_feats = sample['bert_feats'] + src_nonpadding = (txt_tokens > 0).float()[:, :, None] + ph_encoder_out1 = ph_encoder(txt_tokens, bert_feats=bert_feats, ph2word=sample['ph2word']) * src_nonpadding + ph_encoder_out2 = ph_encoder(txt_tokens, bert_feats=bert_feats, ph2word=sample['ph2word']) * src_nonpadding + # word_encoding1 = group_hidden_by_segs(ph_encoder_out1, sample['ph2word'], sample['ph2word'].max().item()) + # word_encoding2 = group_hidden_by_segs(ph_encoder_out2, sample['ph2word'], sample['ph2word'].max().item()) + z1 = ((ph_encoder_out1 * src_nonpadding).sum(1) / src_nonpadding.sum(1)) + z2 = ((ph_encoder_out2 * src_nonpadding).sum(1) / src_nonpadding.sum(1)) + + cos_sim = sim(z1.unsqueeze(1), z2.unsqueeze(0)) + labels = torch.arange(cos_sim.size(0)).long().to(z1.device) + ce_fn = nn.CrossEntropyLoss() + cl_loss = ce_fn(cos_sim, labels) + losses['cl_v'] = cl_loss.detach() + losses['cl'] = cl_loss * hparams['lambda_cl'] + elif hparams['cl_version'] == "v3": + # use the word-level contrastive learning + cl_feats = sample['cl_feats'] + bs, _, t = cl_feats['cl_input_ids'].shape + cl_input_ids = cl_feats['cl_input_ids'].reshape([bs*2, t]) + cl_attention_mask = cl_feats['cl_attention_mask'].reshape([bs*2, t]) + cl_token_type_ids = cl_feats['cl_token_type_ids'].reshape([bs*2, t]) + cl_output = bert(cl_input_ids, attention_mask=cl_attention_mask,token_type_ids=cl_token_type_ids,) + cl_output = cl_output.last_hidden_state.reshape([-1, 768]) # [bs*2,t_w,768] ==> [bs*2*t_w, 768] + cl_word_out = cl_output[cl_attention_mask.reshape([-1]).bool()] # [num_word*2, 768] + cl_word_out = cl_word_out.view([-1, 2, 768]) + z1_total, z2_total = cl_word_out[:,0], cl_word_out[:,1] # [num_word, 768] + ce_fn = nn.CrossEntropyLoss() + start_idx = 0 + lengths = cl_attention_mask.sum(-1) + cl_loss_accu = 0 + for i in range(bs): + length = lengths[i] + z1 = z1_total[start_idx:start_idx + length] + z2 = z2_total[start_idx:start_idx + length] + start_idx += length + cos_sim = sim(z1.unsqueeze(1), z2.unsqueeze(0)) + labels = torch.arange(cos_sim.size(0)).long().to(z1.device) + cl_loss_accu += ce_fn(cos_sim, labels) * length + cl_loss = cl_loss_accu / lengths.sum() + losses['cl_v'] = cl_loss.detach() + losses['cl'] = cl_loss * hparams['lambda_cl'] + elif hparams['cl_version'] == "v4": + # with Wiki dataset + cl_feats = sample['cl_feats'] + bs, _, t = cl_feats['cl_input_ids'].shape + cl_input_ids = cl_feats['cl_input_ids'].reshape([bs*2, t]) + cl_attention_mask = cl_feats['cl_attention_mask'].reshape([bs*2, t]) + cl_token_type_ids = cl_feats['cl_token_type_ids'].reshape([bs*2, t]) + cl_output = bert(cl_input_ids, attention_mask=cl_attention_mask,token_type_ids=cl_token_type_ids,) + pooler_output = pooler(cl_attention_mask, cl_output) + pooler_output = pooler_output.reshape([bs, 2, -1]) + z1, z2 = pooler_output[:,0], pooler_output[:,1] + + cos_sim = sim(z1.unsqueeze(1), z2.unsqueeze(0)) + labels = torch.arange(cos_sim.size(0)).long().to(z1.device) + ce_fn = nn.CrossEntropyLoss() + cl_loss = ce_fn(cos_sim, labels) + losses['cl_v'] = cl_loss.detach() + losses['cl'] = cl_loss * hparams['lambda_cl'] + elif hparams['cl_version'] == "v5": + # with NLI dataset + cl_feats = sample['cl_feats'] + cl_input_ids = cl_feats['sent0']['cl_input_ids'] + cl_attention_mask = cl_feats['sent0']['cl_attention_mask'] + cl_token_type_ids = cl_feats['sent0']['cl_token_type_ids'] + cl_output = bert(cl_input_ids, attention_mask=cl_attention_mask,token_type_ids=cl_token_type_ids,) + z1 = pooler_output_sent0 = pooler(cl_attention_mask, cl_output) + + cl_input_ids = cl_feats['sent1']['cl_input_ids'] + cl_attention_mask = cl_feats['sent1']['cl_attention_mask'] + cl_token_type_ids = cl_feats['sent1']['cl_token_type_ids'] + cl_output = bert(cl_input_ids, attention_mask=cl_attention_mask,token_type_ids=cl_token_type_ids,) + z2 = pooler_output_sent1 = pooler(cl_attention_mask, cl_output) + + cl_input_ids = cl_feats['hard_neg']['cl_input_ids'] + cl_attention_mask = cl_feats['hard_neg']['cl_attention_mask'] + cl_token_type_ids = cl_feats['hard_neg']['cl_token_type_ids'] + cl_output = bert(cl_input_ids, attention_mask=cl_attention_mask,token_type_ids=cl_token_type_ids,) + z3 = pooler_output_neg = pooler(cl_attention_mask, cl_output) + + cos_sim = sim(z1.unsqueeze(1), z2.unsqueeze(0)) + z1_z3_cos = sim(z1.unsqueeze(1), z3.unsqueeze(0)) + cos_sim = torch.cat([cos_sim, z1_z3_cos], 1) # [n_sent, n_sent * 2] + labels = torch.arange(cos_sim.size(0)).long().to(cos_sim.device) # [n_sent, ] + ce_fn = nn.CrossEntropyLoss() + cl_loss = ce_fn(cos_sim, labels) + losses['cl_v'] = cl_loss.detach() + losses['cl'] = cl_loss * hparams['lambda_cl'] + else: + raise NotImplementedError() + + if hparams['lambda_mlm'] > 0: + cl_feats = sample['cl_feats'] + mlm_input_ids = cl_feats['mlm_input_ids'] + bs, t = mlm_input_ids.shape + mlm_input_ids = mlm_input_ids.view((-1, mlm_input_ids.size(-1))) + mlm_labels = cl_feats['mlm_labels'] + mlm_labels = mlm_labels.view(-1, mlm_labels.size(-1)) + mlm_attention_mask = cl_feats['mlm_attention_mask'] + + prediction_scores = bert_for_mlm(mlm_input_ids, mlm_attention_mask).logits + ce_fn = nn.CrossEntropyLoss(reduction="none") + mlm_loss = ce_fn(prediction_scores.view(-1, tokenizer.vocab_size), mlm_labels.view(-1)) + mlm_loss = mlm_loss[mlm_labels.view(-1)>=0].mean() + losses['mlm'] = mlm_loss * hparams['lambda_mlm'] + losses['mlm_v'] = mlm_loss.detach() + + return losses, outputs + \ No newline at end of file diff --git a/text_to_speech/tasks/tts/ps_adv_mlm_history.py b/text_to_speech/tasks/tts/ps_adv_mlm_history.py new file mode 100644 index 0000000000000000000000000000000000000000..b61a1b2349a34f504ae59aabb3430cc4eb703fbe --- /dev/null +++ b/text_to_speech/tasks/tts/ps_adv_mlm_history.py @@ -0,0 +1,171 @@ +import torch +from torch import nn +from tasks.tts.ps_adv import PortaSpeechAdvTask, FastSpeechTask +from text_to_speech.utils.commons.hparams import hparams + + +class PortaSpeechAdvMLMTask(PortaSpeechAdvTask): + + def build_optimizer(self, model): + optimizer_gen = torch.optim.AdamW( + self.model.parameters(), + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + + optimizer_disc = torch.optim.AdamW( + self.disc_params, + lr=hparams['disc_lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + **hparams["discriminator_optimizer_params"]) if len(self.disc_params) > 0 else None + + optimizer_encoder = torch.optim.AdamW( + self.model.encoder.parameters(), + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + return [optimizer_gen, optimizer_disc, optimizer_encoder] + + def build_scheduler(self, optimizer): + return [ + FastSpeechTask.build_scheduler(self, optimizer[0]), # Generator Scheduler + torch.optim.lr_scheduler.StepLR(optimizer=optimizer[1], # Discriminator Scheduler + **hparams["discriminator_scheduler_params"]), + FastSpeechTask.build_scheduler(self, optimizer[2]), # Generator Scheduler + ] + + def on_before_optimization(self, opt_idx): + if opt_idx in [0,2]: + nn.utils.clip_grad_norm_(self.dp_params, hparams['clip_grad_norm']) + if self.use_graph_encoder: + nn.utils.clip_grad_norm_(self.gen_params_except_gae_and_dp, hparams['clip_grad_norm']) + nn.utils.clip_grad_norm_(self.gae_params, hparams['clip_grad_norm']) + elif self.use_bert: + nn.utils.clip_grad_norm_(self.gen_params_except_bert_and_dp, hparams['clip_grad_norm']) + nn.utils.clip_grad_norm_(self.bert_params, hparams['clip_grad_norm']) + else: + nn.utils.clip_grad_norm_(self.gen_params_except_dp, hparams['clip_grad_norm']) + else: + nn.utils.clip_grad_norm_(self.disc_params, hparams["clip_grad_norm"]) + + def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx): + if self.scheduler is not None: + self.scheduler[0].step(self.global_step // hparams['accumulate_grad_batches']) + self.scheduler[1].step(self.global_step // hparams['accumulate_grad_batches']) + self.scheduler[2].step(self.global_step // hparams['accumulate_grad_batches']) + + def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx): + if self.scheduler is not None: + self.scheduler[0].step(self.global_step // hparams['accumulate_grad_batches']) + self.scheduler[1].step(self.global_step // hparams['accumulate_grad_batches']) + self.scheduler[2].step(self.global_step // hparams['accumulate_grad_batches']) + + def _training_step(self, sample, batch_idx, optimizer_idx): + loss_output = {} + loss_weights = {} + disc_start = self.global_step >= hparams["disc_start_steps"] and hparams['lambda_mel_adv'] > 0 + if optimizer_idx == 0: + ####################### + # Generator # + ####################### + loss_output, model_out = self.run_model(sample, infer=False) + self.model_out_gt = self.model_out = \ + {k: v.detach() for k, v in model_out.items() if isinstance(v, torch.Tensor)} + if disc_start: + mel_p = model_out['mel_out'] + if hasattr(self.model, 'out2mel'): + mel_p = self.model.out2mel(mel_p) + o_ = self.mel_disc(mel_p) + p_, pc_ = o_['y'], o_['y_c'] + if p_ is not None: + loss_output['a'] = self.mse_loss_fn(p_, p_.new_ones(p_.size())) + loss_weights['a'] = hparams['lambda_mel_adv'] + if pc_ is not None: + loss_output['ac'] = self.mse_loss_fn(pc_, pc_.new_ones(pc_.size())) + loss_weights['ac'] = hparams['lambda_mel_adv'] + elif optimizer_idx == 1: + ####################### + # Discriminator # + ####################### + if disc_start and self.global_step % hparams['disc_interval'] == 0: + model_out = self.model_out_gt + mel_g = sample['mels'] + mel_p = model_out['mel_out'] + o = self.mel_disc(mel_g) + p, pc = o['y'], o['y_c'] + o_ = self.mel_disc(mel_p) + p_, pc_ = o_['y'], o_['y_c'] + if p_ is not None: + loss_output["r"] = self.mse_loss_fn(p, p.new_ones(p.size())) + loss_output["f"] = self.mse_loss_fn(p_, p_.new_zeros(p_.size())) + if pc_ is not None: + loss_output["rc"] = self.mse_loss_fn(pc, pc.new_ones(pc.size())) + loss_output["fc"] = self.mse_loss_fn(pc_, pc_.new_zeros(pc_.size())) + else: + loss_output, model_out = self.run_contrastive_learning(sample) + + total_loss = sum([loss_weights.get(k, 1) * v for k, v in loss_output.items() if isinstance(v, torch.Tensor) and v.requires_grad]) + loss_output['batch_size'] = sample['txt_tokens'].size()[0] + return total_loss, loss_output + + def run_contrastive_learning(self, sample): + losses = {} + outputs = {} + + bert = self.model.encoder.bert + pooler = self.model.encoder.pooler + sim = self.model.encoder.sim + # electra_gen = self.model.encoder.electra_gen + # electra_disc = self.model.encoder.electra_disc + # electra_head = self.model.encoder.electra_head + + cl_feats = sample['cl_feats'] + bs, _, t = cl_feats['cl_input_ids'].shape + cl_input_ids = cl_feats['cl_input_ids'].reshape([bs*2, t]) + cl_attention_mask = cl_feats['cl_attention_mask'].reshape([bs*2, t]) + cl_token_type_ids = cl_feats['cl_token_type_ids'].reshape([bs*2, t]) + cl_output = bert(cl_input_ids, attention_mask=cl_attention_mask,token_type_ids=cl_token_type_ids,) + pooler_output = pooler(cl_attention_mask, cl_output) + pooler_output = pooler_output.reshape([bs, 2, -1]) + z1, z2 = pooler_output[:,0], pooler_output[:,1] + + cos_sim = sim(z1.unsqueeze(1), z2.unsqueeze(0)) + labels = torch.arange(cos_sim.size(0)).long().to(z1.device) + ce_fn = nn.CrossEntropyLoss() + cl_loss = ce_fn(cos_sim, labels) + losses['cl_v'] = cl_loss.detach() + losses['cl'] = cl_loss * hparams['lambda_mlm'] + + # mlm_input_ids = cl_feats['mlm_input_ids'] + # mlm_input_ids = mlm_input_ids.view((-1, mlm_input_ids.size(-1))) + # with torch.no_grad(): + # g_pred = electra_gen(mlm_input_ids, cl_attention_mask)[0].argmax(-1) + # g_pred[:, 0] = 101 # CLS token + # replaced = (g_pred != cl_input_ids) * cl_attention_mask + # e_inputs = g_pred * cl_attention_mask + # mlm_outputs = electra_disc( + # e_inputs, + # attention_mask=cl_attention_mask, + # token_type_ids=cl_token_type_ids, + # position_ids=None, + # head_mask=None, + # inputs_embeds=None, + # output_attentions=None, + # output_hidden_states=False, # True if cls.model_args.pooler_type in ['avg_top2', 'avg_first_last'] else False, + # return_dict=True, + # cls_input=pooler_output.view((-1, pooler_output.size(-1))), + # ) + # e_labels = replaced.view(-1, replaced.size(-1)) + # prediction_scores = electra_head(mlm_outputs.last_hidden_state) + # # rep = (e_labels == 1) * cl_attention_mask + # # fix = (e_labels == 0) * cl_attention_mask + # # prediction = prediction_scores.argmax(-1) + # # self.electra_rep_acc = float((prediction*rep).sum()/rep.sum()) + # # self.electra_fix_acc = float(1.0 - (prediction*fix).sum()/fix.sum()) + # # self.electra_acc = float(((prediction == e_labels) * cl_attention_mask).sum()/cl_attention_mask.sum()) + # masked_lm_loss = ce_fn(prediction_scores.view(-1, 2), e_labels.view(-1)) + # losses['mlm_v'] = masked_lm_loss.detach() + # losses['mlm'] = masked_lm_loss * hparams['lambda_mlm'] + + return losses, outputs + \ No newline at end of file diff --git a/text_to_speech/tasks/tts/ps_flow.py b/text_to_speech/tasks/tts/ps_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..a52fcd7bc3887f479d02e9ffbf03cb6e717a89d5 --- /dev/null +++ b/text_to_speech/tasks/tts/ps_flow.py @@ -0,0 +1,135 @@ +import torch +from text_to_speech.modules.tts.portaspeech.portaspeech_flow import PortaSpeechFlow +from tasks.tts.fs import FastSpeechTask +from tasks.tts.ps import PortaSpeechTask +from text_to_speech.utils.audio.pitch.utils import denorm_f0 +from text_to_speech.utils.commons.hparams import hparams + + +class PortaSpeechFlowTask(PortaSpeechTask): + def __init__(self): + super().__init__() + self.training_post_glow = False + + def build_tts_model(self): + ph_dict_size = len(self.token_encoder) + word_dict_size = len(self.word_encoder) + self.model = PortaSpeechFlow(ph_dict_size, word_dict_size, hparams) + + def _training_step(self, sample, batch_idx, opt_idx): + self.training_post_glow = self.global_step >= hparams['post_glow_training_start'] \ + and hparams['use_post_flow'] + if hparams['two_stage'] and \ + ((opt_idx == 0 and self.training_post_glow) or (opt_idx == 1 and not self.training_post_glow)): + return None + loss_output, _ = self.run_model(sample) + total_loss = sum([v for v in loss_output.values() if isinstance(v, torch.Tensor) and v.requires_grad]) + loss_output['batch_size'] = sample['txt_tokens'].size()[0] + if 'postflow' in loss_output and loss_output['postflow'] is None: + return None + return total_loss, loss_output + + def run_model(self, sample, infer=False, *args, **kwargs): + if not infer: + training_post_glow = self.training_post_glow + spk_embed = sample.get('spk_embed') + spk_id = sample.get('spk_ids') + output = self.model(sample['txt_tokens'], + sample['word_tokens'], + ph2word=sample['ph2word'], + mel2word=sample['mel2word'], + mel2ph=sample['mel2ph'], + word_len=sample['word_lengths'].max(), + tgt_mels=sample['mels'], + pitch=sample.get('pitch'), + spk_embed=spk_embed, + spk_id=spk_id, + infer=False, + forward_post_glow=training_post_glow, + two_stage=hparams['two_stage'], + global_step=self.global_step, + bert_feats=sample.get('bert_feats')) + losses = {} + self.add_mel_loss(output['mel_out'], sample['mels'], losses) + if (training_post_glow or not hparams['two_stage']) and hparams['use_post_flow']: + losses['postflow'] = output['postflow'] + losses['l1'] = losses['l1'].detach() + losses['ssim'] = losses['ssim'].detach() + if not training_post_glow or not hparams['two_stage'] or not self.training: + losses['kl'] = output['kl'] + if self.global_step < hparams['kl_start_steps']: + losses['kl'] = losses['kl'].detach() + else: + losses['kl'] = torch.clamp(losses['kl'], min=hparams['kl_min']) + losses['kl'] = losses['kl'] * hparams['lambda_kl'] + if hparams['dur_level'] == 'word': + self.add_dur_loss( + output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses) + self.get_attn_stats(output['attn'], sample, losses) + else: + super().add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses) + return losses, output + else: + use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) + forward_post_glow = self.global_step >= hparams['post_glow_training_start'] + 1000 \ + and hparams['use_post_flow'] + spk_embed = sample.get('spk_embed') + spk_id = sample.get('spk_ids') + output = self.model( + sample['txt_tokens'], + sample['word_tokens'], + ph2word=sample['ph2word'], + word_len=sample['word_lengths'].max(), + pitch=sample.get('pitch'), + mel2ph=sample['mel2ph'] if use_gt_dur else None, + mel2word=sample['mel2word'] if hparams['profile_infer'] or hparams['use_gt_dur'] else None, + infer=True, + forward_post_glow=forward_post_glow, + spk_embed=spk_embed, + spk_id=spk_id, + two_stage=hparams['two_stage'], + bert_feats=sample.get('bert_feats')) + return output + + def validation_step(self, sample, batch_idx): + self.training_post_glow = self.global_step >= hparams['post_glow_training_start'] \ + and hparams['use_post_flow'] + return super().validation_step(sample, batch_idx) + + def save_valid_result(self, sample, batch_idx, model_out): + super(PortaSpeechFlowTask, self).save_valid_result(sample, batch_idx, model_out) + sr = hparams['audio_sample_rate'] + f0_gt = None + if sample.get('f0') is not None: + f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu()) + if self.global_step > 0: + # save FVAE result + if hparams['use_post_flow']: + wav_pred = self.vocoder.spec2wav(model_out['mel_out_fvae'][0].cpu(), f0=f0_gt) + self.logger.add_audio(f'wav_fvae_{batch_idx}', wav_pred, self.global_step, sr) + self.plot_mel(batch_idx, sample['mels'], model_out['mel_out_fvae'][0], + f'mel_fvae_{batch_idx}', f0s=f0_gt) + + def build_optimizer(self, model): + if hparams['two_stage'] and hparams['use_post_flow']: + self.optimizer = torch.optim.AdamW( + [p for name, p in self.model.named_parameters() if 'post_flow' not in name], + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + self.post_flow_optimizer = torch.optim.AdamW( + self.model.post_flow.parameters(), + lr=hparams['post_flow_lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + return [self.optimizer, self.post_flow_optimizer] + else: + self.optimizer = torch.optim.AdamW( + self.model.parameters(), + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + return [self.optimizer] + + def build_scheduler(self, optimizer): + return FastSpeechTask.build_scheduler(self, optimizer[0]) \ No newline at end of file diff --git a/text_to_speech/tasks/tts/speech_base.py b/text_to_speech/tasks/tts/speech_base.py new file mode 100644 index 0000000000000000000000000000000000000000..7b5f85edc9b827806ca565b617e1b72149e09943 --- /dev/null +++ b/text_to_speech/tasks/tts/speech_base.py @@ -0,0 +1,373 @@ +import filecmp +import os +import traceback +import numpy as np +import pandas as pd +import torch +import torch.distributed as dist +import torch.nn.functional as F +import torch.optim +import torch.utils.data +import yaml +from tqdm import tqdm +import utils +from tasks.tts.dataset_utils import BaseSpeechDataset +from tasks.tts.utils. import parse_mel_losses, parse_dataset_configs, load_data_preprocessor, load_data_binarizer +from tasks.tts.vocoder_infer.base_vocoder import BaseVocoder, get_vocoder_cls +from text_to_speech.utils.audio.align import mel2token_to_dur +from text_to_speech.utils.audio.io import save_wav +from text_to_speech.utils.audio.pitch_extractors import extract_pitch_simple +from text_to_speech.utils.commons.base_task import BaseTask +from text_to_speech.utils.commons.ckpt_utils import load_ckpt +from text_to_speech.utils.commons.dataset_utils import data_loader, BaseConcatDataset +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.commons.multiprocess_utils import MultiprocessManager +from text_to_speech.utils.commons.tensor_utils import tensors_to_scalars +from text_to_speech.utils.metrics.ssim import ssim +from text_to_speech.utils.nn.model_utils import print_arch +from text_to_speech.utils.nn.schedulers import RSQRTSchedule, NoneSchedule, WarmupSchedule +from text_to_speech.utils.nn.seq_utils import weights_nonzero_speech +from text_to_speech.utils.plot.plot import spec_to_figure +from text_to_speech.utils.text.text_encoder import build_token_encoder +import matplotlib.pyplot as plt + + +class SpeechBaseTask(BaseTask): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.dataset_cls = BaseSpeechDataset + self.vocoder = None + data_dir = hparams['binary_data_dir'] + if not hparams['use_word_input']: + self.token_encoder = build_token_encoder(f'{data_dir}/phone_set.json') + else: + self.token_encoder = build_token_encoder(f'{data_dir}/word_set.json') + self.padding_idx = self.token_encoder.pad() + self.eos_idx = self.token_encoder.eos() + self.seg_idx = self.token_encoder.seg() + self.saving_result_pool = None + self.saving_results_futures = None + self.mel_losses = parse_mel_losses() + self.max_tokens, self.max_sentences, \ + self.max_valid_tokens, self.max_valid_sentences = parse_dataset_configs() + + ########################## + # datasets + ########################## + @data_loader + def train_dataloader(self): + if hparams['train_sets'] != '': + train_sets = hparams['train_sets'].split("|") + # check if all train_sets have the same spk map and dictionary + binary_data_dir = hparams['binary_data_dir'] + file_to_cmp = ['phone_set.json'] + if os.path.exists(f'{binary_data_dir}/word_set.json'): + file_to_cmp.append('word_set.json') + if hparams['use_spk_id']: + file_to_cmp.append('spk_map.json') + for f in file_to_cmp: + for ds_name in train_sets: + base_file = os.path.join(binary_data_dir, f) + ds_file = os.path.join(ds_name, f) + assert filecmp.cmp(base_file, ds_file), \ + f'{f} in {ds_name} is not same with that in {binary_data_dir}.' + train_dataset = BaseConcatDataset([ + self.dataset_cls(prefix='train', shuffle=True, data_dir=ds_name) for ds_name in train_sets]) + else: + train_dataset = self.dataset_cls(prefix=hparams['train_set_name'], shuffle=True) + return self.build_dataloader(train_dataset, True, self.max_tokens, self.max_sentences, + endless=hparams['endless_ds']) + + @data_loader + def val_dataloader(self): + valid_dataset = self.dataset_cls(prefix=hparams['valid_set_name'], shuffle=False) + return self.build_dataloader(valid_dataset, False, self.max_valid_tokens, self.max_valid_sentences, + batch_by_size=False) + + @data_loader + def test_dataloader(self): + test_dataset = self.dataset_cls(prefix=hparams['test_set_name'], shuffle=False) + self.test_dl = self.build_dataloader( + test_dataset, False, self.max_valid_tokens, self.max_valid_sentences, batch_by_size=False) + return self.test_dl + + def build_dataloader(self, dataset, shuffle, max_tokens=None, max_sentences=None, + required_batch_size_multiple=-1, endless=False, batch_by_size=True): + devices_cnt = torch.cuda.device_count() + if devices_cnt == 0: + devices_cnt = 1 + if required_batch_size_multiple == -1: + required_batch_size_multiple = devices_cnt + + def shuffle_batches(batches): + np.random.shuffle(batches) + return batches + + if max_tokens is not None: + max_tokens *= devices_cnt + if max_sentences is not None: + max_sentences *= devices_cnt + indices = dataset.ordered_indices() + if batch_by_size: + batch_sampler = utils.commons.dataset_utils.batch_by_size( + indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences, + required_batch_size_multiple=required_batch_size_multiple, + ) + else: + batch_sampler = [] + for i in range(0, len(indices), max_sentences): + batch_sampler.append(indices[i:i + max_sentences]) + + if shuffle: + batches = shuffle_batches(list(batch_sampler)) + if endless: + batches = [b for _ in range(1000) for b in shuffle_batches(list(batch_sampler))] + else: + batches = batch_sampler + if endless: + batches = [b for _ in range(1000) for b in batches] + num_workers = dataset.num_workers + if self.trainer.use_ddp: + num_replicas = dist.get_world_size() + rank = dist.get_rank() + batches = [x[rank::num_replicas] for x in batches if len(x) % num_replicas == 0] + return torch.utils.data.DataLoader(dataset, + collate_fn=dataset.collater, + batch_sampler=batches, + num_workers=num_workers, + pin_memory=False) + + ########################## + # scheduler and optimizer + ########################## + def build_model(self): + self.build_tts_model() + if hparams['load_ckpt'] != '': + load_ckpt(self.model, hparams['load_ckpt']) + print_arch(self.model) + return self.model + + def build_tts_model(self): + raise NotImplementedError + + def build_scheduler(self, optimizer): + if hparams['scheduler'] == 'rsqrt': + return RSQRTSchedule(optimizer, hparams['lr'], hparams['warmup_updates'], hparams['hidden_size']) + elif hparams['scheduler'] == 'warmup': + return WarmupSchedule(optimizer, hparams['lr'], hparams['warmup_updates']) + elif hparams['scheduler'] == 'step_lr': + return torch.optim.lr_scheduler.StepLR( + optimizer=optimizer, step_size=500, gamma=0.998) + else: + return NoneSchedule(optimizer, hparams['lr']) + + def build_optimizer(self, model): + self.optimizer = optimizer = torch.optim.AdamW( + model.parameters(), + lr=hparams['lr'], + betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), + weight_decay=hparams['weight_decay']) + + return optimizer + + ########################## + # training and validation + ########################## + def _training_step(self, sample, batch_idx, _): + loss_output, _ = self.run_model(sample) + total_loss = sum([v for v in loss_output.values() if isinstance(v, torch.Tensor) and v.requires_grad]) + loss_output['batch_size'] = sample['txt_tokens'].size()[0] + return total_loss, loss_output + + def run_model(self, sample, infer=False): + """ + + :param sample: a batch of data + :param infer: bool, run in infer mode + :return: + if not infer: + return losses, model_out + if infer: + return model_out + """ + raise NotImplementedError + + def validation_start(self): + self.vocoder = get_vocoder_cls(hparams['vocoder'])() + + def validation_step(self, sample, batch_idx): + outputs = {} + outputs['losses'] = {} + outputs['losses'], model_out = self.run_model(sample) + outputs['total_loss'] = sum(outputs['losses'].values()) + outputs['nsamples'] = sample['nsamples'] + outputs = tensors_to_scalars(outputs) + if self.global_step % hparams['valid_infer_interval'] == 0 \ + and batch_idx < hparams['num_valid_plots']: + self.save_valid_result(sample, batch_idx, model_out) + return outputs + + def validation_end(self, outputs): + self.vocoder = None + return super(SpeechBaseTask, self).validation_end(outputs) + + def save_valid_result(self, sample, batch_idx, model_out): + raise NotImplementedError + + ########################## + # losses + ########################## + def add_mel_loss(self, mel_out, target, losses, postfix=''): + for loss_name, lambd in self.mel_losses.items(): + losses[f'{loss_name}{postfix}'] = getattr(self, f'{loss_name}_loss')(mel_out, target) * lambd + + def l1_loss(self, decoder_output, target): + # decoder_output : B x T x n_mel + # target : B x T x n_mel + l1_loss = F.l1_loss(decoder_output, target, reduction='none') + weights = weights_nonzero_speech(target) + l1_loss = (l1_loss * weights).sum() / weights.sum() + return l1_loss + + def mse_loss(self, decoder_output, target): + # decoder_output : B x T x n_mel + # target : B x T x n_mel + assert decoder_output.shape == target.shape + mse_loss = F.mse_loss(decoder_output, target, reduction='none') + weights = weights_nonzero_speech(target) + mse_loss = (mse_loss * weights).sum() / weights.sum() + return mse_loss + + def ssim_loss(self, decoder_output, target, bias=6.0): + # decoder_output : B x T x n_mel + # target : B x T x n_mel + assert decoder_output.shape == target.shape + weights = weights_nonzero_speech(target) + decoder_output = decoder_output[:, None] + bias + target = target[:, None] + bias + ssim_loss = 1 - ssim(decoder_output, target, size_average=False) + ssim_loss = (ssim_loss * weights).sum() / weights.sum() + return ssim_loss + + def plot_mel(self, batch_idx, spec_out, spec_gt=None, name=None, title='', f0s=None, dur_info=None): + vmin = hparams['mel_vmin'] + vmax = hparams['mel_vmax'] + if len(spec_out.shape) == 3: + spec_out = spec_out[0] + if isinstance(spec_out, torch.Tensor): + spec_out = spec_out.cpu().numpy() + if spec_gt is not None: + if len(spec_gt.shape) == 3: + spec_gt = spec_gt[0] + if isinstance(spec_gt, torch.Tensor): + spec_gt = spec_gt.cpu().numpy() + max_len = max(len(spec_gt), len(spec_out)) + if max_len - len(spec_gt) > 0: + spec_gt = np.pad(spec_gt, [[0, max_len - len(spec_gt)], [0, 0]], mode='constant', + constant_values=vmin) + if max_len - len(spec_out) > 0: + spec_out = np.pad(spec_out, [[0, max_len - len(spec_out)], [0, 0]], mode='constant', + constant_values=vmin) + spec_out = np.concatenate([spec_out, spec_gt], -1) + name = f'mel_val_{batch_idx}' if name is None else name + self.logger.add_figure(name, spec_to_figure( + spec_out, vmin, vmax, title=title, f0s=f0s, dur_info=dur_info), self.global_step) + + ########################## + # testing + ########################## + def test_start(self): + self.saving_result_pool = MultiprocessManager(int(os.getenv('N_PROC', os.cpu_count()))) + self.saving_results_futures = [] + self.gen_dir = os.path.join( + hparams['work_dir'], f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}') + self.vocoder: BaseVocoder = get_vocoder_cls(hparams['vocoder'])() + os.makedirs(self.gen_dir, exist_ok=True) + os.makedirs(f'{self.gen_dir}/wavs', exist_ok=True) + os.makedirs(f'{self.gen_dir}/plot', exist_ok=True) + if hparams.get('save_mel_npy', False): + os.makedirs(f'{self.gen_dir}/mel_npy', exist_ok=True) + + def test_step(self, sample, batch_idx): + """ + + :param sample: + :param batch_idx: + :return: + """ + assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference' + outputs = self.run_model(sample, infer=True) + text = sample['text'][0] + item_name = sample['item_name'][0] + tokens = sample['txt_tokens'][0].cpu().numpy() + mel_gt = sample['mels'][0].cpu().numpy() + mel_pred = outputs['mel_out'][0].cpu().numpy() + str_phs = self.token_encoder.decode(tokens, strip_padding=True) + base_fn = f'[{self.results_id:06d}][{item_name.replace("%", "_")}][%s]' + if text is not None: + base_fn += text.replace(":", "$3A")[:80] + base_fn = base_fn.replace(' ', '_') + gen_dir = self.gen_dir + wav_pred = self.vocoder.spec2wav(mel_pred) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs]) + if hparams['save_gt']: + wav_gt = self.vocoder.spec2wav(mel_gt) + self.saving_result_pool.add_job(self.save_result, args=[ + wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs]) + print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}") + return { + 'item_name': item_name, + 'text': text, + 'ph_tokens': self.token_encoder.decode(tokens.tolist()), + 'wav_fn_pred': base_fn % 'P', + 'wav_fn_gt': base_fn % 'G', + } + + @staticmethod + def save_result(wav_out, mel, base_fn, gen_dir, str_phs=None, mel2ph=None, alignment=None): + save_wav(wav_out, f'{gen_dir}/wavs/{base_fn}.wav', hparams['audio_sample_rate'], + norm=hparams['out_wav_norm']) + fig = plt.figure(figsize=(14, 10)) + spec_vmin = hparams['mel_vmin'] + spec_vmax = hparams['mel_vmax'] + heatmap = plt.pcolor(mel.T, vmin=spec_vmin, vmax=spec_vmax) + fig.colorbar(heatmap) + try: + f0 = extract_pitch_simple(wav_out) + f0 = f0 / 10 * (f0 > 0) + plt.plot(f0, c='white', linewidth=1, alpha=0.6) + if mel2ph is not None and str_phs is not None: + decoded_txt = str_phs.split(" ") + dur = mel2token_to_dur(torch.LongTensor(mel2ph)[None, :], len(decoded_txt))[0].numpy() + dur = [0] + list(np.cumsum(dur)) + for i in range(len(dur) - 1): + shift = (i % 20) + 1 + plt.text(dur[i], shift, decoded_txt[i]) + plt.hlines(shift, dur[i], dur[i + 1], colors='b' if decoded_txt[i] != '|' else 'black') + plt.vlines(dur[i], 0, 5, colors='b' if decoded_txt[i] != '|' else 'black', + alpha=1, linewidth=1) + plt.tight_layout() + plt.savefig(f'{gen_dir}/plot/{base_fn}.png', format='png') + plt.close(fig) + if hparams.get('save_mel_npy', False): + np.save(f'{gen_dir}/mel_npy/{base_fn}', mel) + if alignment is not None: + fig, ax = plt.subplots(figsize=(12, 16)) + im = ax.imshow(alignment, aspect='auto', origin='lower', + interpolation='none') + decoded_txt = str_phs.split(" ") + ax.set_yticks(np.arange(len(decoded_txt))) + ax.set_yticklabels(list(decoded_txt), fontsize=6) + fig.colorbar(im, ax=ax) + fig.savefig(f'{gen_dir}/attn_plot/{base_fn}_attn.png', format='png') + plt.close(fig) + except Exception: + traceback.print_exc() + return None + + def test_end(self, outputs): + pd.DataFrame(outputs).to_csv(f'{self.gen_dir}/meta.csv') + for _1, _2 in tqdm(self.saving_result_pool.get_results(), total=len(self.saving_result_pool)): + pass + return {} diff --git a/text_to_speech/tasks/tts/synta.py b/text_to_speech/tasks/tts/synta.py new file mode 100644 index 0000000000000000000000000000000000000000..9a9e3ba127549e3b95d989cca5f52caa4766ec94 --- /dev/null +++ b/text_to_speech/tasks/tts/synta.py @@ -0,0 +1,25 @@ +import os +import torch +import torch.nn.functional as F +from torch import nn + +from text_to_speech.modules.tts.syntaspeech.syntaspeech import SyntaSpeech +from tasks.tts.ps_adv import PortaSpeechAdvTask +from text_to_speech.utils.commons.hparams import hparams + + +class SyntaSpeechTask(PortaSpeechAdvTask): + def build_tts_model(self): + ph_dict_size = len(self.token_encoder) + word_dict_size = len(self.word_encoder) + self.model = SyntaSpeech(ph_dict_size, word_dict_size, hparams) + + self.gen_params = [p for p in self.model.parameters() if p.requires_grad] + self.dp_params = [p for k, p in self.model.named_parameters() if (('dur_predictor' in k) and p.requires_grad)] + self.gen_params_except_dp = [p for k, p in self.model.named_parameters() if (('dur_predictor' not in k) and p.requires_grad)] + self.bert_params = [p for k, p in self.model.named_parameters() if (('bert' in k) and p.requires_grad)] + self.gen_params_except_bert_and_dp = [p for k, p in self.model.named_parameters() if ('dur_predictor' not in k) and ('bert' not in k) and p.requires_grad ] + + self.use_bert = True if len(self.bert_params) > 0 else False + + \ No newline at end of file diff --git a/text_to_speech/tasks/tts/synta_mlm.py b/text_to_speech/tasks/tts/synta_mlm.py new file mode 100644 index 0000000000000000000000000000000000000000..eb5ee529dbb41ccb8a8c506fcbb2c24078893a80 --- /dev/null +++ b/text_to_speech/tasks/tts/synta_mlm.py @@ -0,0 +1,25 @@ +import os +import torch +import torch.nn.functional as F +from torch import nn + +from text_to_speech.modules.tts.syntaspeech.syntaspeech import SyntaSpeech +from tasks.tts.ps_adv_mlm import PortaSpeechAdvMLMTask +from text_to_speech.utils.commons.hparams import hparams + + +class SyntaSpeechMLMTask(PortaSpeechAdvMLMTask): + def build_tts_model(self): + ph_dict_size = len(self.token_encoder) + word_dict_size = len(self.word_encoder) + self.model = SyntaSpeech(ph_dict_size, word_dict_size, hparams) + + self.gen_params = [p for p in self.model.parameters() if p.requires_grad] + self.dp_params = [p for k, p in self.model.named_parameters() if (('dur_predictor' in k) and p.requires_grad)] + self.gen_params_except_dp = [p for k, p in self.model.named_parameters() if (('dur_predictor' not in k) and p.requires_grad)] + self.bert_params = [p for k, p in self.model.named_parameters() if (('bert' in k) and p.requires_grad)] + self.gen_params_except_bert_and_dp = [p for k, p in self.model.named_parameters() if ('dur_predictor' not in k) and ('bert' not in k) and p.requires_grad ] + + self.use_bert = True if len(self.bert_params) > 0 else False + + \ No newline at end of file diff --git a/text_to_speech/tasks/tts/tts_utils.py b/text_to_speech/tasks/tts/tts_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..47e654c03eaf9c50ae0bb3c97ecd661666a1a6b1 --- /dev/null +++ b/text_to_speech/tasks/tts/tts_utils.py @@ -0,0 +1,54 @@ +import importlib + +from text_to_speech.data_gen.tts.base_binarizer import BaseBinarizer +from text_to_speech.data_gen.tts.base_preprocess import BasePreprocessor +from text_to_speech.data_gen.tts.txt_processors.base_text_processor import get_txt_processor_cls +from text_to_speech.utils.commons.hparams import hparams + + +def parse_dataset_configs(): + max_tokens = hparams['max_tokens'] + max_sentences = hparams['max_sentences'] + max_valid_tokens = hparams['max_valid_tokens'] + if max_valid_tokens == -1: + hparams['max_valid_tokens'] = max_valid_tokens = max_tokens + max_valid_sentences = hparams['max_valid_sentences'] + if max_valid_sentences == -1: + hparams['max_valid_sentences'] = max_valid_sentences = max_sentences + return max_tokens, max_sentences, max_valid_tokens, max_valid_sentences + + +def parse_mel_losses(): + mel_losses = hparams['mel_losses'].split("|") + loss_and_lambda = {} + for i, l in enumerate(mel_losses): + if l == '': + continue + if ':' in l: + l, lbd = l.split(":") + lbd = float(lbd) + else: + lbd = 1.0 + loss_and_lambda[l] = lbd + print("| Mel losses:", loss_and_lambda) + return loss_and_lambda + + +def load_data_preprocessor(): + preprocess_cls = hparams["preprocess_cls"] + pkg = ".".join(preprocess_cls.split(".")[:-1]) + cls_name = preprocess_cls.split(".")[-1] + preprocessor: BasePreprocessor = getattr(importlib.import_module(pkg), cls_name)() + preprocess_args = {} + preprocess_args.update(hparams['preprocess_args']) + return preprocessor, preprocess_args + + +def load_data_binarizer(): + binarizer_cls = hparams['binarizer_cls'] + pkg = ".".join(binarizer_cls.split(".")[:-1]) + cls_name = binarizer_cls.split(".")[-1] + binarizer: BaseBinarizer = getattr(importlib.import_module(pkg), cls_name)() + binarization_args = {} + binarization_args.update(hparams['binarization_args']) + return binarizer, binarization_args diff --git a/text_to_speech/tasks/tts/vocoder_infer/__init__.py b/text_to_speech/tasks/tts/vocoder_infer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1868dc2601363d6843f2b02a82e6fa2de375408b --- /dev/null +++ b/text_to_speech/tasks/tts/vocoder_infer/__init__.py @@ -0,0 +1,2 @@ +from . import hifigan +from . import pwg \ No newline at end of file diff --git a/text_to_speech/tasks/tts/vocoder_infer/base_vocoder.py b/text_to_speech/tasks/tts/vocoder_infer/base_vocoder.py new file mode 100644 index 0000000000000000000000000000000000000000..a332205b553a0a95b9529c78c1ab5e49099b5d41 --- /dev/null +++ b/text_to_speech/tasks/tts/vocoder_infer/base_vocoder.py @@ -0,0 +1,63 @@ +import librosa +from text_to_speech.utils.audio import librosa_wav2spec +from text_to_speech.utils.commons.hparams import hparams +import numpy as np + +REGISTERED_VOCODERS = {} + + +def register_vocoder(name): + def _f(cls): + REGISTERED_VOCODERS[name] = cls + return cls + + return _f + + +def get_vocoder_cls(vocoder_name): + return REGISTERED_VOCODERS.get(vocoder_name) + + +class BaseVocoder: + def spec2wav(self, mel): + """ + + :param mel: [T, 80] + :return: wav: [T'] + """ + + raise NotImplementedError + + @staticmethod + def wav2spec(wav_fn): + """ + + :param wav_fn: str + :return: wav, mel: [T, 80] + """ + wav_spec_dict = librosa_wav2spec(wav_fn, fft_size=hparams['fft_size'], + hop_size=hparams['hop_size'], + win_length=hparams['win_size'], + num_mels=hparams['audio_num_mel_bins'], + fmin=hparams['fmin'], + fmax=hparams['fmax'], + sample_rate=hparams['audio_sample_rate'], + loud_norm=hparams['loud_norm']) + wav = wav_spec_dict['wav'] + mel = wav_spec_dict['mel'] + return wav, mel + + @staticmethod + def wav2mfcc(wav_fn): + fft_size = hparams['fft_size'] + hop_size = hparams['hop_size'] + win_length = hparams['win_size'] + sample_rate = hparams['audio_sample_rate'] + wav, _ = librosa.core.load(wav_fn, sr=sample_rate) + mfcc = librosa.feature.mfcc(y=wav, sr=sample_rate, n_mfcc=13, + n_fft=fft_size, hop_length=hop_size, + win_length=win_length, pad_mode="constant", power=1.0) + mfcc_delta = librosa.feature.delta(mfcc, order=1) + mfcc_delta_delta = librosa.feature.delta(mfcc, order=2) + mfcc = np.concatenate([mfcc, mfcc_delta, mfcc_delta_delta]).T + return mfcc diff --git a/text_to_speech/tasks/tts/vocoder_infer/hifigan.py b/text_to_speech/tasks/tts/vocoder_infer/hifigan.py new file mode 100644 index 0000000000000000000000000000000000000000..bfae9d5afb89ed4ca411285f6207c98184384d1c --- /dev/null +++ b/text_to_speech/tasks/tts/vocoder_infer/hifigan.py @@ -0,0 +1,31 @@ +import torch +from text_to_speech.modules.vocoder.hifigan.hifigan import HifiGanGenerator +from tasks.tts.vocoder_infer.base_vocoder import register_vocoder, BaseVocoder +from text_to_speech.utils.commons.ckpt_utils import load_ckpt +from text_to_speech.utils.commons.hparams import set_hparams, hparams +from text_to_speech.utils.commons.meters import Timer + +total_time = 0 + + +@register_vocoder('HifiGAN') +class HifiGAN(BaseVocoder): + def __init__(self): + base_dir = hparams['vocoder_ckpt'] + config_path = f'{base_dir}/config.yaml' + self.config = config = set_hparams(config_path, global_hparams=False) + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + self.model = HifiGanGenerator(config) + load_ckpt(self.model, base_dir, 'model_gen') + self.model.to(self.device) + self.model.eval() + + def spec2wav(self, mel, **kwargs): + device = self.device + with torch.no_grad(): + c = torch.FloatTensor(mel).unsqueeze(0).to(device) + c = c.transpose(2, 1) + with Timer('hifigan', enable=hparams['profile_infer']): + y = self.model(c).view(-1) + wav_out = y.cpu().numpy() + return wav_out \ No newline at end of file diff --git a/text_to_speech/tasks/tts/vocoder_infer/pwg.py b/text_to_speech/tasks/tts/vocoder_infer/pwg.py new file mode 100644 index 0000000000000000000000000000000000000000..64db778d3cc32fcad0647f03db4b60feb14f5f0d --- /dev/null +++ b/text_to_speech/tasks/tts/vocoder_infer/pwg.py @@ -0,0 +1,32 @@ +import torch +from text_to_speech.modules.vocoder.parallel_wavegan.models.parallel_wavegan import ParallelWaveGANGenerator +from tasks.tts.vocoder_infer.base_vocoder import register_vocoder, BaseVocoder +from text_to_speech.utils.commons.ckpt_utils import load_ckpt +from text_to_speech.utils.commons.hparams import set_hparams, hparams +from text_to_speech.utils.commons.meters import Timer + +total_time = 0 + + +@register_vocoder('PWG') +class PWG(BaseVocoder): + def __init__(self): + base_dir = hparams['vocoder_ckpt'] + config_path = f'{base_dir}/config.yaml' + self.config = config = set_hparams(config_path, global_hparams=False) + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + self.model = ParallelWaveGANGenerator(**config["generator_params"]) + load_ckpt(self.model, base_dir, 'model_gen') + self.model.to(self.device) + self.model.eval() + + def spec2wav(self, mel, **kwargs): + device = self.device + with torch.no_grad(): + c = torch.FloatTensor(mel).unsqueeze(0).to(device) + c = c.transpose(2, 1) # [B, C, T] + z = None + with Timer('pwg', enable=hparams['profile_infer']): + y = self.model(z, c).view(-1) + wav_out = y.cpu().numpy() + return wav_out \ No newline at end of file diff --git a/text_to_speech/tasks/vocoder/dataset_utils.py b/text_to_speech/tasks/vocoder/dataset_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..406178f0729caba4c7d46962395b52cf73cb1bda --- /dev/null +++ b/text_to_speech/tasks/vocoder/dataset_utils.py @@ -0,0 +1,130 @@ +import numpy as np +import torch +import torch.distributed as dist +from torch.utils.data import DistributedSampler +from text_to_speech.utils.commons.dataset_utils import BaseDataset, collate_1d, collate_2d +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.commons.indexed_datasets import IndexedDataset + + +class EndlessDistributedSampler(DistributedSampler): + def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.shuffle = shuffle + + g = torch.Generator() + g.manual_seed(self.epoch) + if self.shuffle: + indices = [i for _ in range(1000) for i in torch.randperm( + len(self.dataset), generator=g).tolist()] + else: + indices = [i for _ in range(1000) for i in list(range(len(self.dataset)))] + indices = indices[:len(indices) // self.num_replicas * self.num_replicas] + indices = indices[self.rank::self.num_replicas] + self.indices = indices + + def __iter__(self): + return iter(self.indices) + + def __len__(self): + return len(self.indices) + + +class VocoderDataset(BaseDataset): + def __init__(self, prefix, shuffle=False): + super().__init__(shuffle) + self.hparams = hparams + self.prefix = prefix + self.data_dir = hparams['binary_data_dir'] + self.is_infer = prefix == 'test' + self.batch_max_frames = 0 if self.is_infer else hparams['max_samples'] // hparams['hop_size'] + self.hop_size = hparams['hop_size'] + self.indexed_ds = None + self.sizes = np.load(f'{self.data_dir}/{self.prefix}_lengths.npy') + self.avail_idxs = [idx for idx, s in enumerate(self.sizes) if s > self.batch_max_frames] + print(f"| {len(self.sizes) - len(self.avail_idxs)} short items are skipped in {prefix} set.") + self.sizes = [s for idx, s in enumerate(self.sizes) if s > self.batch_max_frames] + + def _get_item(self, index): + if self.indexed_ds is None: + self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}') + item = self.indexed_ds[index] + return item + + def __getitem__(self, index): + index = self.avail_idxs[index] + item = self._get_item(index) + sample = { + "id": index, + "item_name": item['item_name'], + "mel": torch.FloatTensor(item['mel']), + "wav": torch.FloatTensor(item['wav'].astype(np.float32)), + "pitch": torch.LongTensor(item['pitch']), + "f0": torch.FloatTensor(item['f0']) + } + return sample + + def collater(self, batch): + if len(batch) == 0: + return {} + + y_batch, c_batch, p_batch, f0_batch = [], [], [], [] + item_name = [] + for idx in range(len(batch)): + item_name.append(batch[idx]['item_name']) + x, c = batch[idx]['wav'], batch[idx]['mel'] + p, f0 = batch[idx]['pitch'], batch[idx]['f0'] + self._assert_ready_for_upsampling(x, c, self.hop_size) + if len(c) > self.batch_max_frames: + # randomly pickup with the batch_max_steps length of the part + batch_max_frames = self.batch_max_frames if self.batch_max_frames != 0 else len(c) - 1 + batch_max_steps = batch_max_frames * self.hop_size + interval_start = 0 + interval_end = len(c) - batch_max_frames + start_frame = np.random.randint(interval_start, interval_end) + start_step = start_frame * self.hop_size + y = x[start_step: start_step + batch_max_steps] + c = c[start_frame: start_frame + batch_max_frames] + p = p[start_frame: start_frame + batch_max_frames] + f0 = f0[start_frame: start_frame + batch_max_frames] + self._assert_ready_for_upsampling(y, c, self.hop_size) + else: + print(f"Removed short sample from batch (length={len(x)}).") + continue + y_batch += [y.reshape(-1, 1)] # [(T, 1), (T, 1), ...] + c_batch += [c] # [(T' C), (T' C), ...] + p_batch += [p] # [(T' C), (T' C), ...] + f0_batch += [f0] # [(T' C), (T' C), ...] + + # convert each batch to tensor, asuume that each item in batch has the same length + y_batch = collate_2d(y_batch, 0).transpose(2, 1) # (B, 1, T) + c_batch = collate_2d(c_batch, 0).transpose(2, 1) # (B, C, T') + p_batch = collate_1d(p_batch, 0) # (B, T') + f0_batch = collate_1d(f0_batch, 0) # (B, T') + + # make input noise signal batch tensor + z_batch = torch.randn(y_batch.size()) # (B, 1, T) + return { + 'z': z_batch, + 'mels': c_batch, + 'wavs': y_batch, + 'pitches': p_batch, + 'f0': f0_batch, + 'item_name': item_name + } + + @staticmethod + def _assert_ready_for_upsampling(x, c, hop_size): + """Assert the audio and feature lengths are correctly adjusted for upsamping.""" + assert len(x) == (len(c)) * hop_size diff --git a/text_to_speech/tasks/vocoder/hifigan.py b/text_to_speech/tasks/vocoder/hifigan.py new file mode 100644 index 0000000000000000000000000000000000000000..4fbb5037c6fa48178d51e5e685cf6191b150d214 --- /dev/null +++ b/text_to_speech/tasks/vocoder/hifigan.py @@ -0,0 +1,63 @@ +import torch.nn.functional as F +from torch import nn + +from text_to_speech.modules.vocoder.hifigan.hifigan import HifiGanGenerator, MultiPeriodDiscriminator, MultiScaleDiscriminator, \ + generator_loss, feature_loss, discriminator_loss +from text_to_speech.modules.vocoder.hifigan.mel_utils import mel_spectrogram +from text_to_speech.modules.vocoder.hifigan.stft_loss import MultiResolutionSTFTLoss +from tasks.vocoder.vocoder_base import VocoderBaseTask +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.nn.model_utils import print_arch + + +class HifiGanTask(VocoderBaseTask): + def build_model(self): + self.model_gen = HifiGanGenerator(hparams) + self.model_disc = nn.ModuleDict() + self.model_disc['mpd'] = MultiPeriodDiscriminator() + self.model_disc['msd'] = MultiScaleDiscriminator() + self.stft_loss = MultiResolutionSTFTLoss() + print_arch(self.model_gen) + if hparams['load_ckpt'] != '': + self.load_ckpt(hparams['load_ckpt'], 'model_gen', 'model_gen', force=True, strict=True) + self.load_ckpt(hparams['load_ckpt'], 'model_disc', 'model_disc', force=True, strict=True) + return self.model_gen + + def _training_step(self, sample, batch_idx, optimizer_idx): + mel = sample['mels'] + y = sample['wavs'] + f0 = sample['f0'] + loss_output = {} + if optimizer_idx == 0: + ####################### + # Generator # + ####################### + y_ = self.model_gen(mel, f0) + y_mel = mel_spectrogram(y.squeeze(1), hparams).transpose(1, 2) + y_hat_mel = mel_spectrogram(y_.squeeze(1), hparams).transpose(1, 2) + loss_output['mel'] = F.l1_loss(y_hat_mel, y_mel) * hparams['lambda_mel'] + _, y_p_hat_g, fmap_f_r, fmap_f_g = self.model_disc['mpd'](y, y_, mel) + _, y_s_hat_g, fmap_s_r, fmap_s_g = self.model_disc['msd'](y, y_, mel) + loss_output['a_p'] = generator_loss(y_p_hat_g) * hparams['lambda_adv'] + loss_output['a_s'] = generator_loss(y_s_hat_g) * hparams['lambda_adv'] + if hparams['use_fm_loss']: + loss_output['fm_f'] = feature_loss(fmap_f_r, fmap_f_g) + loss_output['fm_s'] = feature_loss(fmap_s_r, fmap_s_g) + if hparams['use_ms_stft']: + loss_output['sc'], loss_output['mag'] = self.stft_loss(y.squeeze(1), y_.squeeze(1)) + self.y_ = y_.detach() + self.y_mel = y_mel.detach() + self.y_hat_mel = y_hat_mel.detach() + else: + ####################### + # Discriminator # + ####################### + y_ = self.y_ + # MPD + y_p_hat_r, y_p_hat_g, _, _ = self.model_disc['mpd'](y, y_.detach(), mel) + loss_output['r_p'], loss_output['f_p'] = discriminator_loss(y_p_hat_r, y_p_hat_g) + # MSD + y_s_hat_r, y_s_hat_g, _, _ = self.model_disc['msd'](y, y_.detach(), mel) + loss_output['r_s'], loss_output['f_s'] = discriminator_loss(y_s_hat_r, y_s_hat_g) + total_loss = sum(loss_output.values()) + return total_loss, loss_output diff --git a/text_to_speech/tasks/vocoder/vocoder_base.py b/text_to_speech/tasks/vocoder/vocoder_base.py new file mode 100644 index 0000000000000000000000000000000000000000..cf01d387c6eacc5a42728fe2003f9c48ff69e05f --- /dev/null +++ b/text_to_speech/tasks/vocoder/vocoder_base.py @@ -0,0 +1,137 @@ +import os +import torch +import torch.distributed as dist +from torch import nn +from torch.utils.data import DistributedSampler +from tasks.vocoder.dataset_utils import VocoderDataset, EndlessDistributedSampler +from text_to_speech.utils.audio.io import save_wav +from text_to_speech.utils.commons.base_task import BaseTask +from text_to_speech.utils.commons.dataset_utils import data_loader +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.commons.tensor_utils import tensors_to_scalars + + +class VocoderBaseTask(BaseTask): + def __init__(self): + super(VocoderBaseTask, self).__init__() + self.max_sentences = hparams['max_sentences'] + self.max_valid_sentences = hparams['max_valid_sentences'] + if self.max_valid_sentences == -1: + hparams['max_valid_sentences'] = self.max_valid_sentences = self.max_sentences + self.dataset_cls = VocoderDataset + + @data_loader + def train_dataloader(self): + train_dataset = self.dataset_cls('train', shuffle=True) + return self.build_dataloader(train_dataset, True, self.max_sentences, hparams['endless_ds']) + + @data_loader + def val_dataloader(self): + valid_dataset = self.dataset_cls('test', shuffle=False) + return self.build_dataloader(valid_dataset, False, self.max_valid_sentences) + + @data_loader + def test_dataloader(self): + test_dataset = self.dataset_cls('test', shuffle=False) + return self.build_dataloader(test_dataset, False, self.max_valid_sentences) + + def build_dataloader(self, dataset, shuffle, max_sentences, endless=False): + world_size = 1 + rank = 0 + if dist.is_initialized(): + world_size = dist.get_world_size() + rank = dist.get_rank() + sampler_cls = DistributedSampler if not endless else EndlessDistributedSampler + train_sampler = sampler_cls( + dataset=dataset, + num_replicas=world_size, + rank=rank, + shuffle=shuffle, + ) + return torch.utils.data.DataLoader( + dataset=dataset, + shuffle=False, + collate_fn=dataset.collater, + batch_size=max_sentences, + num_workers=dataset.num_workers, + sampler=train_sampler, + pin_memory=True, + ) + + def build_optimizer(self, model): + optimizer_gen = torch.optim.AdamW(self.model_gen.parameters(), lr=hparams['lr'], + betas=[hparams['adam_b1'], hparams['adam_b2']]) + optimizer_disc = torch.optim.AdamW(self.model_disc.parameters(), lr=hparams['lr'], + betas=[hparams['adam_b1'], hparams['adam_b2']]) + return [optimizer_gen, optimizer_disc] + + def build_scheduler(self, optimizer): + return { + "gen": torch.optim.lr_scheduler.StepLR( + optimizer=optimizer[0], + **hparams["generator_scheduler_params"]), + "disc": torch.optim.lr_scheduler.StepLR( + optimizer=optimizer[1], + **hparams["discriminator_scheduler_params"]), + } + + def validation_step(self, sample, batch_idx): + outputs = {} + total_loss, loss_output = self._training_step(sample, batch_idx, 0) + outputs['losses'] = tensors_to_scalars(loss_output) + outputs['total_loss'] = tensors_to_scalars(total_loss) + + if self.global_step % hparams['valid_infer_interval'] == 0 and \ + batch_idx < 10: + mels = sample['mels'] + y = sample['wavs'] + f0 = sample['f0'] + y_ = self.model_gen(mels, f0) + for idx, (wav_pred, wav_gt, item_name) in enumerate(zip(y_, y, sample["item_name"])): + wav_pred = wav_pred / wav_pred.abs().max() + if self.global_step == 0: + wav_gt = wav_gt / wav_gt.abs().max() + self.logger.add_audio(f'wav_{batch_idx}_{idx}_gt', wav_gt, self.global_step, + hparams['audio_sample_rate']) + self.logger.add_audio(f'wav_{batch_idx}_{idx}_pred', wav_pred, self.global_step, + hparams['audio_sample_rate']) + return outputs + + def test_start(self): + self.gen_dir = os.path.join(hparams['work_dir'], + f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}') + os.makedirs(self.gen_dir, exist_ok=True) + + def test_step(self, sample, batch_idx): + mels = sample['mels'] + y = sample['wavs'] + f0 = sample['f0'] + loss_output = {} + y_ = self.model_gen(mels, f0) + gen_dir = os.path.join(hparams['work_dir'], f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}') + os.makedirs(gen_dir, exist_ok=True) + for idx, (wav_pred, wav_gt, item_name) in enumerate(zip(y_, y, sample["item_name"])): + wav_gt = wav_gt.clamp(-1, 1) + wav_pred = wav_pred.clamp(-1, 1) + save_wav( + wav_gt.view(-1).cpu().float().numpy(), f'{gen_dir}/{item_name}_gt.wav', + hparams['audio_sample_rate']) + save_wav( + wav_pred.view(-1).cpu().float().numpy(), f'{gen_dir}/{item_name}_pred.wav', + hparams['audio_sample_rate']) + return loss_output + + def test_end(self, outputs): + return {} + + def on_before_optimization(self, opt_idx): + if opt_idx == 0: + nn.utils.clip_grad_norm_(self.model_gen.parameters(), hparams['generator_grad_norm']) + else: + nn.utils.clip_grad_norm_(self.model_disc.parameters(), hparams["discriminator_grad_norm"]) + + def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx): + if optimizer_idx == 0: + self.scheduler['gen'].step(self.global_step // hparams['accumulate_grad_batches']) + else: + self.scheduler['disc'].step(self.global_step // hparams['accumulate_grad_batches']) diff --git a/text_to_speech/utils/__pycache__/os_utils.cpython-38.pyc b/text_to_speech/utils/__pycache__/os_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ea313e7a83b038b1cc201b08e746224cfed42b4 Binary files /dev/null and b/text_to_speech/utils/__pycache__/os_utils.cpython-38.pyc differ diff --git a/text_to_speech/utils/audio/__init__.py b/text_to_speech/utils/audio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..737ddaad53b77109b1edc05015e41bfde7651476 --- /dev/null +++ b/text_to_speech/utils/audio/__init__.py @@ -0,0 +1,82 @@ +import librosa +import numpy as np +import pyloudnorm as pyln + +from text_to_speech.utils.audio.vad import trim_long_silences + + +def librosa_pad_lr(x, fsize, fshift, pad_sides=1): + '''compute right padding (final frame) or both sides padding (first and final frames) + ''' + assert pad_sides in (1, 2) + # return int(fsize // 2) + pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0] + if pad_sides == 1: + return 0, pad + else: + return pad // 2, pad // 2 + pad % 2 + + +def amp_to_db(x): + return 20 * np.log10(np.maximum(1e-5, x)) + + +def db_to_amp(x): + return 10.0 ** (x * 0.05) + + +def normalize(S, min_level_db): + return (S - min_level_db) / -min_level_db + + +def denormalize(D, min_level_db): + return (D * -min_level_db) + min_level_db + + +def librosa_wav2spec(wav_path, + fft_size=1024, + hop_size=256, + win_length=1024, + window="hann", + num_mels=80, + fmin=80, + fmax=-1, + eps=1e-6, + sample_rate=22050, + loud_norm=False, + trim_long_sil=False): + if isinstance(wav_path, str): + if trim_long_sil: + wav, _, _ = trim_long_silences(wav_path, sample_rate) + else: + wav, _ = librosa.core.load(wav_path, sr=sample_rate) + else: + wav = wav_path + + if loud_norm: + meter = pyln.Meter(sample_rate) # create BS.1770 meter + loudness = meter.integrated_loudness(wav) + wav = pyln.normalize.loudness(wav, loudness, -22.0) + if np.abs(wav).max() > 1: + wav = wav / np.abs(wav).max() + + # get amplitude spectrogram + x_stft = librosa.stft(wav, n_fft=fft_size, hop_length=hop_size, + win_length=win_length, window=window, pad_mode="constant") + linear_spc = np.abs(x_stft) # (n_bins, T) + + # get mel basis + fmin = 0 if fmin == -1 else fmin + fmax = sample_rate / 2 if fmax == -1 else fmax + mel_basis = librosa.filters.mel(sample_rate, fft_size, num_mels, fmin, fmax) + + # calculate mel spec + mel = mel_basis @ linear_spc + mel = np.log10(np.maximum(eps, mel)) # (n_mel_bins, T) + l_pad, r_pad = librosa_pad_lr(wav, fft_size, hop_size, 1) + wav = np.pad(wav, (l_pad, r_pad), mode='constant', constant_values=0.0) + wav = wav[:mel.shape[1] * hop_size] + + # log linear spec + linear_spc = np.log10(np.maximum(eps, linear_spc)) + return {'wav': wav, 'mel': mel.T, 'linear': linear_spc.T, 'mel_basis': mel_basis} diff --git a/text_to_speech/utils/audio/__pycache__/__init__.cpython-38.pyc b/text_to_speech/utils/audio/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e9cf47e70ebee23070620e786a71691a2786458 Binary files /dev/null and b/text_to_speech/utils/audio/__pycache__/__init__.cpython-38.pyc differ diff --git a/text_to_speech/utils/audio/__pycache__/align.cpython-38.pyc b/text_to_speech/utils/audio/__pycache__/align.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6317c1cf11226236cdc249fc8a1aa569157d8764 Binary files /dev/null and b/text_to_speech/utils/audio/__pycache__/align.cpython-38.pyc differ diff --git a/text_to_speech/utils/audio/__pycache__/cwt.cpython-38.pyc b/text_to_speech/utils/audio/__pycache__/cwt.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9571b49e6e456be9ae37c6c604266e2c1d3aa889 Binary files /dev/null and b/text_to_speech/utils/audio/__pycache__/cwt.cpython-38.pyc differ diff --git a/text_to_speech/utils/audio/__pycache__/io.cpython-38.pyc b/text_to_speech/utils/audio/__pycache__/io.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4459bf7646d066fd5355784cdd238b006c8b8c78 Binary files /dev/null and b/text_to_speech/utils/audio/__pycache__/io.cpython-38.pyc differ diff --git a/text_to_speech/utils/audio/__pycache__/pitch_extractors.cpython-38.pyc b/text_to_speech/utils/audio/__pycache__/pitch_extractors.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e0225729f1f5bc457ba2cddd061ced42bf44655 Binary files /dev/null and b/text_to_speech/utils/audio/__pycache__/pitch_extractors.cpython-38.pyc differ diff --git a/text_to_speech/utils/audio/__pycache__/rnnoise.cpython-38.pyc b/text_to_speech/utils/audio/__pycache__/rnnoise.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d1b0fbb11a5dfc06402526858244edf8aa61231 Binary files /dev/null and b/text_to_speech/utils/audio/__pycache__/rnnoise.cpython-38.pyc differ diff --git a/text_to_speech/utils/audio/__pycache__/vad.cpython-38.pyc b/text_to_speech/utils/audio/__pycache__/vad.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5419a5021957b3aeaa1d4cdc3e4706c4f66e96ba Binary files /dev/null and b/text_to_speech/utils/audio/__pycache__/vad.cpython-38.pyc differ diff --git a/text_to_speech/utils/audio/align.py b/text_to_speech/utils/audio/align.py new file mode 100644 index 0000000000000000000000000000000000000000..7c6162f29271433538eb92a87a9340d51f4c4a12 --- /dev/null +++ b/text_to_speech/utils/audio/align.py @@ -0,0 +1,115 @@ +import re + +import torch +import numpy as np +from textgrid import TextGrid + +from text_to_speech.utils.text.text_encoder import is_sil_phoneme + + +def get_mel2ph(tg_fn, ph, mel, hop_size, audio_sample_rate, min_sil_duration=0): + ph_list = ph.split(" ") + itvs = TextGrid.fromFile(tg_fn)[1] + itvs_ = [] + for i in range(len(itvs)): + if itvs[i].maxTime - itvs[i].minTime < min_sil_duration and i > 0 and is_sil_phoneme(itvs[i].mark): + itvs_[-1].maxTime = itvs[i].maxTime + else: + itvs_.append(itvs[i]) + itvs.intervals = itvs_ + itv_marks = [itv.mark for itv in itvs] + tg_len = len([x for x in itvs if not is_sil_phoneme(x.mark)]) + ph_len = len([x for x in ph_list if not is_sil_phoneme(x)]) + assert tg_len == ph_len, (tg_len, ph_len, itv_marks, ph_list, tg_fn) + mel2ph = np.zeros([mel.shape[0]], int) + + # to process aishell3_no_tone + # for _ in range(10): + + # if itvs[-2].mark == '': + # start_time = itvs[-2].minTime + # end_time = itvs[-1].maxTime + # mark = itvs[-1].mark + # itvs[-2].maxTime = end_time + # itvs[-2].mark = mark + # itvs_ = [] + # for i in range(len(itvs)-1): + # itvs_.append(itvs[i]) + # itvs.intervals = itvs_ + + # if itvs[-1].mark == '': + # start_time = itvs[-2].minTime + # end_time = itvs[-1].maxTime + # itvs[-2].maxTime = end_time + # itvs_ = [] + # for i in range(len(itvs)-1): + # itvs_.append(itvs[i]) + # itvs.intervals = itvs_ + + + i_itv = 0 + i_ph = 0 + while i_itv < len(itvs): + itv = itvs[i_itv] + ph = ph_list[i_ph] + itv_ph = itv.mark + start_frame = int(itv.minTime * audio_sample_rate / hop_size + 0.5) + end_frame = int(itv.maxTime * audio_sample_rate / hop_size + 0.5) + if is_sil_phoneme(itv_ph) and not is_sil_phoneme(ph): + mel2ph[start_frame:end_frame] = i_ph + i_itv += 1 + elif not is_sil_phoneme(itv_ph) and is_sil_phoneme(ph): + i_ph += 1 + else: + if not ((is_sil_phoneme(itv_ph) and is_sil_phoneme(ph)) \ + or re.sub(r'\d+', '', itv_ph.lower()) == re.sub(r'\d+', '', ph.lower())): + print(f"| WARN: {tg_fn} phs are not same: ", itv_ph, ph, itv_marks, ph_list) + mel2ph[start_frame:end_frame] = i_ph + 1 + i_ph += 1 + i_itv += 1 + mel2ph[-1] = mel2ph[-2] + assert not np.any(mel2ph == 0) + T_t = len(ph_list) + dur = mel2token_to_dur(mel2ph, T_t) + return mel2ph.tolist(), dur.tolist() + + +def split_audio_by_mel2ph(audio, mel2ph, hop_size, audio_num_mel_bins): + if isinstance(audio, torch.Tensor): + audio = audio.numpy() + if isinstance(mel2ph, torch.Tensor): + mel2ph = mel2ph.numpy() + assert len(audio.shape) == 1, len(mel2ph.shape) == 1 + split_locs = [] + for i in range(1, len(mel2ph)): + if mel2ph[i] != mel2ph[i - 1]: + split_loc = i * hop_size + split_locs.append(split_loc) + + new_audio = [] + for i in range(len(split_locs) - 1): + new_audio.append(audio[split_locs[i]:split_locs[i + 1]]) + new_audio.append(np.zeros([0.5 * audio_num_mel_bins])) + return np.concatenate(new_audio) + + +def mel2token_to_dur(mel2token, T_txt=None, max_dur=None): + is_torch = isinstance(mel2token, torch.Tensor) + has_batch_dim = True + if not is_torch: + mel2token = torch.LongTensor(mel2token) + if T_txt is None: + T_txt = mel2token.max() + if len(mel2token.shape) == 1: + mel2token = mel2token[None, ...] + has_batch_dim = False + B, _ = mel2token.shape + dur = mel2token.new_zeros(B, T_txt + 1).scatter_add(1, mel2token, torch.ones_like(mel2token)) + dur = dur[:, 1:] + if max_dur is not None: + dur = dur.clamp(max=max_dur) + if not is_torch: + dur = dur.numpy() + if not has_batch_dim: + dur = dur[0] + return dur diff --git a/text_to_speech/utils/audio/cwt.py b/text_to_speech/utils/audio/cwt.py new file mode 100644 index 0000000000000000000000000000000000000000..9d42ffc7b40d200dcfa66a0823163f98173b50e6 --- /dev/null +++ b/text_to_speech/utils/audio/cwt.py @@ -0,0 +1,143 @@ +import numpy as np +from pycwt import wavelet +from scipy.interpolate import interp1d + +dt = 0.005 +dj = 1 + + +def convert_continuos_f0(f0): + '''CONVERT F0 TO CONTINUOUS F0 + Args: + f0 (ndarray): original f0 sequence with the shape (T) + Return: + (ndarray): continuous f0 with the shape (T) + ''' + # get uv information as binary + f0 = np.copy(f0) + uv = (f0 == 0).astype(float) + + # get start and end of f0 + if (f0 == 0).all(): + print("| all of the f0 values are 0.") + return uv, f0 + start_f0 = f0[f0 != 0][0] + end_f0 = f0[f0 != 0][-1] + + # padding start and end of f0 sequence + start_idx = np.where(f0 == start_f0)[0][0] + end_idx = np.where(f0 == end_f0)[0][-1] + f0[:start_idx] = start_f0 + f0[end_idx:] = end_f0 + + # get non-zero frame index + nz_frames = np.where(f0 != 0)[0] + + # perform linear interpolation + f = interp1d(nz_frames, f0[nz_frames]) + cont_f0 = f(np.arange(0, f0.shape[0])) + + return uv, cont_f0 + + +def get_cont_lf0(f0, frame_period=5.0): + uv, cont_f0_lpf = convert_continuos_f0(f0) + # cont_f0_lpf = low_pass_filter(cont_f0_lpf, int(1.0 / (frame_period * 0.001)), cutoff=20) + cont_lf0_lpf = np.log(cont_f0_lpf) + return uv, cont_lf0_lpf + + +def get_lf0_cwt(lf0): + ''' + input: + signal of shape (N) + output: + Wavelet_lf0 of shape(10, N), scales of shape(10) + ''' + mother = wavelet.MexicanHat() + s0 = dt * 2 + J = 9 + + Wavelet_lf0, scales, _, _, _, _ = wavelet.cwt(np.squeeze(lf0), dt, dj, s0, J, mother) + # Wavelet.shape => (J + 1, len(lf0)) + Wavelet_lf0 = np.real(Wavelet_lf0).T + return Wavelet_lf0, scales + + +def norm_scale(Wavelet_lf0): + mean = Wavelet_lf0.mean(0)[None, :] + std = Wavelet_lf0.std(0)[None, :] + Wavelet_lf0_norm = (Wavelet_lf0 - mean) / std + return Wavelet_lf0_norm, mean, std + + +def normalize_cwt_lf0(f0, mean, std): + uv, cont_lf0_lpf = get_cont_lf0(f0) + cont_lf0_norm = (cont_lf0_lpf - mean) / std + Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_norm) + Wavelet_lf0_norm, _, _ = norm_scale(Wavelet_lf0) + + return Wavelet_lf0_norm + + +def get_lf0_cwt_norm(f0s, mean, std): + uvs = list() + cont_lf0_lpfs = list() + cont_lf0_lpf_norms = list() + Wavelet_lf0s = list() + Wavelet_lf0s_norm = list() + scaless = list() + + means = list() + stds = list() + for f0 in f0s: + uv, cont_lf0_lpf = get_cont_lf0(f0) + cont_lf0_lpf_norm = (cont_lf0_lpf - mean) / std + + Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_lpf_norm) # [560,10] + Wavelet_lf0_norm, mean_scale, std_scale = norm_scale(Wavelet_lf0) # [560,10],[1,10],[1,10] + + Wavelet_lf0s_norm.append(Wavelet_lf0_norm) + uvs.append(uv) + cont_lf0_lpfs.append(cont_lf0_lpf) + cont_lf0_lpf_norms.append(cont_lf0_lpf_norm) + Wavelet_lf0s.append(Wavelet_lf0) + scaless.append(scales) + means.append(mean_scale) + stds.append(std_scale) + + return Wavelet_lf0s_norm, scaless, means, stds + + +def inverse_cwt_torch(Wavelet_lf0, scales): + import torch + b = ((torch.arange(0, len(scales)).float().to(Wavelet_lf0.device)[None, None, :] + 1 + 2.5) ** (-2.5)) + lf0_rec = Wavelet_lf0 * b + lf0_rec_sum = lf0_rec.sum(-1) + lf0_rec_sum = (lf0_rec_sum - lf0_rec_sum.mean(-1, keepdim=True)) / lf0_rec_sum.std(-1, keepdim=True) + return lf0_rec_sum + + +def inverse_cwt(Wavelet_lf0, scales): + # mother = wavelet.MexicanHat() + # lf0_rec_sum = wavelet.icwt(Wavelet_lf0[0].T, scales, dt, dj, mother) + b = ((np.arange(0, len(scales))[None, None, :] + 1 + 2.5) ** (-2.5)) + lf0_rec = Wavelet_lf0 * b + lf0_rec_sum = lf0_rec.sum(-1) + # lf0_rec_sum = lf0_rec_sum[None, ...] + lf0_rec_sum = (lf0_rec_sum - lf0_rec_sum.mean(-1, keepdims=True)) / lf0_rec_sum.std(-1, keepdims=True) + return lf0_rec_sum + + +def cwt2f0(cwt_spec, mean, std, cwt_scales): + assert len(mean.shape) == 1 and len(std.shape) == 1 and len(cwt_spec.shape) == 3 + import torch + if isinstance(cwt_spec, torch.Tensor): + f0 = inverse_cwt_torch(cwt_spec, cwt_scales) + f0 = f0 * std[:, None] + mean[:, None] + f0 = f0.exp() # [B, T] + else: + f0 = inverse_cwt(cwt_spec, cwt_scales) + f0 = f0 * std[:, None] + mean[:, None] + f0 = np.exp(f0) # [B, T] + return f0 diff --git a/text_to_speech/utils/audio/griffin_lim.py b/text_to_speech/utils/audio/griffin_lim.py new file mode 100644 index 0000000000000000000000000000000000000000..960132b6a1b8befaf5d0ca968f9908405323d89f --- /dev/null +++ b/text_to_speech/utils/audio/griffin_lim.py @@ -0,0 +1,85 @@ +import librosa +import numpy as np +import torch +import torch.nn.functional as F + + +def _stft(y, hop_size, win_size, fft_size): + return librosa.stft(y=y, n_fft=fft_size, hop_length=hop_size, win_length=win_size, pad_mode='constant') + + +def _istft(y, hop_size, win_size): + return librosa.istft(y, hop_length=hop_size, win_length=win_size) + + +def griffin_lim(S, hop_size, win_size, fft_size, angles=None, n_iters=30): + angles = np.exp(2j * np.pi * np.random.rand(*S.shape)) if angles is None else angles + S_complex = np.abs(S).astype(np.complex) + y = _istft(S_complex * angles, hop_size, win_size) + for i in range(n_iters): + angles = np.exp(1j * np.angle(_stft(y, hop_size, win_size, fft_size))) + y = _istft(S_complex * angles, hop_size, win_size) + return y + + +def istft(amp, ang, hop_size, win_size, fft_size, pad=False, window=None): + spec = amp * torch.exp(1j * ang) + spec_r = spec.real + spec_i = spec.imag + spec = torch.stack([spec_r, spec_i], -1) + if window is None: + window = torch.hann_window(win_size).to(amp.device) + if pad: + spec = F.pad(spec, [0, 0, 0, 1], mode='reflect') + wav = torch.istft(spec, fft_size, hop_size, win_size) + return wav + + +def griffin_lim_torch(S, hop_size, win_size, fft_size, angles=None, n_iters=30): + """ + + Examples: + >>> x_stft = librosa.stft(wav, n_fft=fft_size, hop_length=hop_size, win_length=win_length, pad_mode="constant") + >>> x_stft = x_stft[None, ...] + >>> amp = np.abs(x_stft) + >>> angle_init = np.exp(2j * np.pi * np.random.rand(*x_stft.shape)) + >>> amp = torch.FloatTensor(amp) + >>> wav = griffin_lim_torch(amp, angle_init, hparams) + + :param amp: [B, n_fft, T] + :param ang: [B, n_fft, T] + :return: [B, T_wav] + """ + angles = torch.exp(2j * np.pi * torch.rand(*S.shape)) if angles is None else angles + window = torch.hann_window(win_size).to(S.device) + y = istft(S, angles, hop_size, win_size, fft_size, window=window) + for i in range(n_iters): + x_stft = torch.stft(y, fft_size, hop_size, win_size, window) + x_stft = x_stft[..., 0] + 1j * x_stft[..., 1] + angles = torch.angle(x_stft) + y = istft(S, angles, hop_size, win_size, fft_size, window=window) + return y + + +# Conversions +_mel_basis = None +_inv_mel_basis = None + + +def _build_mel_basis(audio_sample_rate, fft_size, audio_num_mel_bins, fmin, fmax): + assert fmax <= audio_sample_rate // 2 + return librosa.filters.mel(audio_sample_rate, fft_size, n_mels=audio_num_mel_bins, fmin=fmin, fmax=fmax) + + +def _linear_to_mel(spectogram, audio_sample_rate, fft_size, audio_num_mel_bins, fmin, fmax): + global _mel_basis + if _mel_basis is None: + _mel_basis = _build_mel_basis(audio_sample_rate, fft_size, audio_num_mel_bins, fmin, fmax) + return np.dot(_mel_basis, spectogram) + + +def _mel_to_linear(mel_spectrogram, audio_sample_rate, fft_size, audio_num_mel_bins, fmin, fmax): + global _inv_mel_basis + if _inv_mel_basis is None: + _inv_mel_basis = np.linalg.pinv(_build_mel_basis(audio_sample_rate, fft_size, audio_num_mel_bins, fmin, fmax)) + return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram)) diff --git a/text_to_speech/utils/audio/io.py b/text_to_speech/utils/audio/io.py new file mode 100644 index 0000000000000000000000000000000000000000..34d5d20ae13e9aa481b1bc85117ad6539af8a624 --- /dev/null +++ b/text_to_speech/utils/audio/io.py @@ -0,0 +1,22 @@ +import subprocess + +import numpy as np +from scipy.io import wavfile + + +def save_wav(wav, path, sr, norm=False): + if norm: + wav = wav / np.abs(wav).max() + wav = wav * 32767 + wavfile.write(path[:-4] + '.wav', sr, wav.astype(np.int16)) + if path[-4:] == '.mp3': + to_mp3(path[:-4]) + + +def to_mp3(out_path): + if out_path[-4:] == '.wav': + out_path = out_path[:-4] + subprocess.check_call( + f'ffmpeg -threads 1 -loglevel error -i "{out_path}.wav" -vn -b:a 192k -y -hide_banner -async 1 "{out_path}.mp3"', + shell=True, stdin=subprocess.PIPE) + subprocess.check_call(f'rm -f "{out_path}.wav"', shell=True) diff --git a/text_to_speech/utils/audio/pitch/__pycache__/utils.cpython-38.pyc b/text_to_speech/utils/audio/pitch/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d33357084d6fda6d82a2190546c99e0dd05ff65 Binary files /dev/null and b/text_to_speech/utils/audio/pitch/__pycache__/utils.cpython-38.pyc differ diff --git a/text_to_speech/utils/audio/pitch/utils.py b/text_to_speech/utils/audio/pitch/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..238b8022185753a7d4d9d674d189a99050c29b6f --- /dev/null +++ b/text_to_speech/utils/audio/pitch/utils.py @@ -0,0 +1,82 @@ +import numpy as np +import torch + + +def to_lf0(f0): + f0[f0 < 1.0e-5] = 1.0e-6 + lf0 = f0.log() if isinstance(f0, torch.Tensor) else np.log(f0) + lf0[f0 < 1.0e-5] = - 1.0E+10 + return lf0 + + +def to_f0(lf0): + f0 = np.where(lf0 <= 0, 0.0, np.exp(lf0)) + return f0.flatten() + + +def f0_to_coarse(f0, f0_bin=256, f0_max=900.0, f0_min=50.0): + f0_mel_min = 1127 * np.log(1 + f0_min / 700) + f0_mel_max = 1127 * np.log(1 + f0_max / 700) + is_torch = isinstance(f0, torch.Tensor) + f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700) + f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1 + + f0_mel[f0_mel <= 1] = 1 + f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1 + f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(int) + assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min(), f0.min(), f0.max()) + return f0_coarse + + +def coarse_to_f0(f0_coarse, f0_bin=256, f0_max=900.0, f0_min=50.0): + f0_mel_min = 1127 * np.log(1 + f0_min / 700) + f0_mel_max = 1127 * np.log(1 + f0_max / 700) + uv = f0_coarse == 1 + f0 = f0_mel_min + (f0_coarse - 1) * (f0_mel_max - f0_mel_min) / (f0_bin - 2) + f0 = ((f0 / 1127).exp() - 1) * 700 + f0[uv] = 0 + return f0 + + +def norm_f0(f0, uv, pitch_norm='log', f0_mean=400, f0_std=100): + is_torch = isinstance(f0, torch.Tensor) + if pitch_norm == 'standard': + f0 = (f0 - f0_mean) / f0_std + if pitch_norm == 'log': + f0 = torch.log2(f0 + 1e-8) if is_torch else np.log2(f0 + 1e-8) + if uv is not None: + f0[uv > 0] = 0 + return f0 + + +def norm_interp_f0(f0, pitch_norm='log', f0_mean=None, f0_std=None): + is_torch = isinstance(f0, torch.Tensor) + if is_torch: + device = f0.device + f0 = f0.data.cpu().numpy() + uv = f0 == 0 + f0 = norm_f0(f0, uv, pitch_norm, f0_mean, f0_std) + if sum(uv) == len(f0): + f0[uv] = 0 + elif sum(uv) > 0: + f0[uv] = np.interp(np.where(uv)[0], np.where(~uv)[0], f0[~uv]) + if is_torch: + uv = torch.FloatTensor(uv) + f0 = torch.FloatTensor(f0) + f0 = f0.to(device) + uv = uv.to(device) + return f0, uv + + +def denorm_f0(f0, uv, pitch_norm='log', f0_mean=400, f0_std=100, pitch_padding=None, min=50, max=900): + is_torch = isinstance(f0, torch.Tensor) + if pitch_norm == 'standard': + f0 = f0 * f0_std + f0_mean + if pitch_norm == 'log': + f0 = 2 ** f0 + f0 = f0.clamp(min=min, max=max) if is_torch else np.clip(f0, a_min=min, a_max=max) + if uv is not None: + f0[uv > 0] = 0 + if pitch_padding is not None: + f0[pitch_padding] = 0 + return f0 diff --git a/text_to_speech/utils/audio/pitch_extractors.py b/text_to_speech/utils/audio/pitch_extractors.py new file mode 100644 index 0000000000000000000000000000000000000000..6b0e9e1300344e9c2e680e21dca79c20457df9d7 --- /dev/null +++ b/text_to_speech/utils/audio/pitch_extractors.py @@ -0,0 +1,85 @@ +import numpy as np +from text_to_speech.utils.audio.pitch.utils import denorm_f0, norm_f0, f0_to_coarse +import parselmouth + +PITCH_EXTRACTOR = {} + + +def register_pitch_extractor(name): + def register_pitch_extractor_(cls): + PITCH_EXTRACTOR[name] = cls + return cls + + return register_pitch_extractor_ + + +def get_pitch_extractor(name): + return PITCH_EXTRACTOR[name] + + +def extract_pitch_simple(wav): + from text_to_speech.utils.commons.hparams import hparams + return extract_pitch(hparams['pitch_extractor'], wav, + hparams['hop_size'], hparams['audio_sample_rate'], + f0_min=hparams['f0_min'], f0_max=hparams['f0_max']) + + +def extract_pitch(extractor_name, wav_data, hop_size, audio_sample_rate, f0_min=75, f0_max=800, **kwargs): + return get_pitch_extractor(extractor_name)(wav_data, hop_size, audio_sample_rate, f0_min, f0_max, **kwargs) + + +@register_pitch_extractor('parselmouth') +def parselmouth_pitch(wav_data, hop_size, audio_sample_rate, f0_min, f0_max, + voicing_threshold=0.6, *args, **kwargs): + import parselmouth + time_step = hop_size / audio_sample_rate * 1000 + n_mel_frames = int(len(wav_data) // hop_size) + f0_pm = parselmouth.Sound(wav_data, audio_sample_rate).to_pitch_ac( + time_step=time_step / 1000, voicing_threshold=voicing_threshold, + pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency'] + pad_size = (n_mel_frames - len(f0_pm) + 1) // 2 + f0 = np.pad(f0_pm, [[pad_size, n_mel_frames - len(f0_pm) - pad_size]], mode='constant') + return f0 + + +def get_pitch(wav_data, mel, hparams): + """ + :param wav_data: [T] + :param mel: [T, 80] + :param hparams: + :return: + """ + time_step = hparams['hop_size'] / hparams['audio_sample_rate'] * 1000 + f0_min = 80 + f0_max = 750 + + if hparams['pitch_extractor'] == 'harvest': + import pyworld as pw + f0, t = pw.harvest(wav_data.astype(np.double), hparams['audio_sample_rate'], + frame_period=hparams['hop_size'] / hparams['audio_sample_rate'] * 1000) + if hparams['pitch_extractor'] == 'dio': + _f0, t = pw.dio(wav_data.astype(np.double), hparams['audio_sample_rate'], + frame_period=hparams['hop_size'] / hparams['audio_sample_rate'] * 1000) + f0 = pw.stonemask(wav_data.astype(np.double), _f0, t, hparams['audio_sample_rate']) # pitch refinement + elif hparams['pitch_extractor'] == 'parselmouth': + if hparams['hop_size'] == 128: + pad_size = 4 + elif hparams['hop_size'] == 256: + pad_size = 2 + else: + assert False + f0 = parselmouth.Sound(wav_data, hparams['audio_sample_rate']).to_pitch_ac( + time_step=time_step / 1000, voicing_threshold=0.6, + pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency'] + lpad = pad_size * 2 + rpad = len(mel) - len(f0) - lpad + f0 = np.pad(f0, [[lpad, rpad]], mode='constant') + + # mel和f0是2个库抽的 需要保证两者长度一致 + delta_l = len(mel) - len(f0) + assert np.abs(delta_l) <= 8 + if delta_l > 0: + f0 = np.concatenate([f0, [f0[-1]] * delta_l], 0) + f0 = f0[:len(mel)] + pitch_coarse = f0_to_coarse(f0) + return f0, pitch_coarse \ No newline at end of file diff --git a/text_to_speech/utils/audio/rnnoise.py b/text_to_speech/utils/audio/rnnoise.py new file mode 100644 index 0000000000000000000000000000000000000000..47f4eb6471918ca8144f217580a71d1720cd8c36 --- /dev/null +++ b/text_to_speech/utils/audio/rnnoise.py @@ -0,0 +1,48 @@ +# rnnoise.py, requirements: ffmpeg, sox, rnnoise, python +import os +import subprocess + +INSTALL_STR = """ +RNNoise library not found. Please install RNNoise (https://github.com/xiph/rnnoise) to $REPO/rnnoise: +sudo apt-get install -y autoconf automake libtool ffmpeg sox +git clone https://github.com/xiph/rnnoise.git +rm -rf rnnoise/.git +cd rnnoise +./autogen.sh && ./configure && make +cd .. +""" + + +def rnnoise(filename, out_fn=None, verbose=False, out_sample_rate=22050): + assert os.path.exists('./rnnoise/examples/rnnoise_demo'), INSTALL_STR + if out_fn is None: + out_fn = f"{filename[:-4]}.denoised.wav" + out_48k_fn = f"{out_fn}.48000.wav" + tmp0_fn = f"{out_fn}.0.wav" + tmp1_fn = f"{out_fn}.1.wav" + tmp2_fn = f"{out_fn}.2.raw" + tmp3_fn = f"{out_fn}.3.raw" + if verbose: + print("Pre-processing audio...") # wav to pcm raw + subprocess.check_call( + f'sox "{filename}" -G -r48000 "{tmp0_fn}"', shell=True, stdin=subprocess.PIPE) # convert to raw + subprocess.check_call( + f'sox -v 0.95 "{tmp0_fn}" "{tmp1_fn}"', shell=True, stdin=subprocess.PIPE) # convert to raw + subprocess.check_call( + f'ffmpeg -y -i "{tmp1_fn}" -loglevel quiet -f s16le -ac 1 -ar 48000 "{tmp2_fn}"', + shell=True, stdin=subprocess.PIPE) # convert to raw + if verbose: + print("Applying rnnoise algorithm to audio...") # rnnoise + subprocess.check_call( + f'./rnnoise/examples/rnnoise_demo "{tmp2_fn}" "{tmp3_fn}"', shell=True) + + if verbose: + print("Post-processing audio...") # pcm raw to wav + if filename == out_fn: + subprocess.check_call(f'rm -f "{out_fn}"', shell=True) + subprocess.check_call( + f'sox -t raw -r 48000 -b 16 -e signed-integer -c 1 "{tmp3_fn}" "{out_48k_fn}"', shell=True) + subprocess.check_call(f'sox "{out_48k_fn}" -G -r{out_sample_rate} "{out_fn}"', shell=True) + subprocess.check_call(f'rm -f "{tmp0_fn}" "{tmp1_fn}" "{tmp2_fn}" "{tmp3_fn}" "{out_48k_fn}"', shell=True) + if verbose: + print("Audio-filtering completed!") diff --git a/text_to_speech/utils/audio/vad.py b/text_to_speech/utils/audio/vad.py new file mode 100644 index 0000000000000000000000000000000000000000..cbe9c7a6417f234ae46e1754d6736b26e22b2427 --- /dev/null +++ b/text_to_speech/utils/audio/vad.py @@ -0,0 +1,78 @@ +from skimage.transform import resize +import struct +import webrtcvad +from scipy.ndimage.morphology import binary_dilation +import librosa +import numpy as np +import pyloudnorm as pyln +import warnings + +warnings.filterwarnings("ignore", message="Possible clipped samples in output") + +int16_max = (2 ** 15) - 1 + + +def trim_long_silences(path, sr=None, return_raw_wav=False, norm=True, vad_max_silence_length=12): + """ + Ensures that segments without voice in the waveform remain no longer than a + threshold determined by the VAD parameters in params.py. + :param wav: the raw waveform as a numpy array of floats + :param vad_max_silence_length: Maximum number of consecutive silent frames a segment can have. + :return: the same waveform with silences trimmed away (length <= original wav length) + """ + + ## Voice Activation Detection + # Window size of the VAD. Must be either 10, 20 or 30 milliseconds. + # This sets the granularity of the VAD. Should not need to be changed. + sampling_rate = 16000 + wav_raw, sr = librosa.core.load(path, sr=sr) + + if norm: + meter = pyln.Meter(sr) # create BS.1770 meter + loudness = meter.integrated_loudness(wav_raw) + wav_raw = pyln.normalize.loudness(wav_raw, loudness, -20.0) + if np.abs(wav_raw).max() > 1.0: + wav_raw = wav_raw / np.abs(wav_raw).max() + + wav = librosa.resample(wav_raw, sr, sampling_rate, res_type='kaiser_best') + + vad_window_length = 30 # In milliseconds + # Number of frames to average together when performing the moving average smoothing. + # The larger this value, the larger the VAD variations must be to not get smoothed out. + vad_moving_average_width = 8 + + # Compute the voice detection window size + samples_per_window = (vad_window_length * sampling_rate) // 1000 + + # Trim the end of the audio to have a multiple of the window size + wav = wav[:len(wav) - (len(wav) % samples_per_window)] + + # Convert the float waveform to 16-bit mono PCM + pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16)) + + # Perform voice activation detection + voice_flags = [] + vad = webrtcvad.Vad(mode=3) + for window_start in range(0, len(wav), samples_per_window): + window_end = window_start + samples_per_window + voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2], + sample_rate=sampling_rate)) + voice_flags = np.array(voice_flags) + + # Smooth the voice detection with a moving average + def moving_average(array, width): + array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2))) + ret = np.cumsum(array_padded, dtype=float) + ret[width:] = ret[width:] - ret[:-width] + return ret[width - 1:] / width + + audio_mask = moving_average(voice_flags, vad_moving_average_width) + audio_mask = np.round(audio_mask).astype(np.bool) + + # Dilate the voiced regions + audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1)) + audio_mask = np.repeat(audio_mask, samples_per_window) + audio_mask = resize(audio_mask, (len(wav_raw),)) > 0 + if return_raw_wav: + return wav_raw, audio_mask, sr + return wav_raw[audio_mask], audio_mask, sr diff --git a/text_to_speech/utils/commons/__pycache__/ckpt_utils.cpython-38.pyc b/text_to_speech/utils/commons/__pycache__/ckpt_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18b0f527b672fae5983867dbaa8345b6dae37ce8 Binary files /dev/null and b/text_to_speech/utils/commons/__pycache__/ckpt_utils.cpython-38.pyc differ diff --git a/text_to_speech/utils/commons/__pycache__/hparams.cpython-38.pyc b/text_to_speech/utils/commons/__pycache__/hparams.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcf696e17f9dba9706a8297d3917510d32e00d14 Binary files /dev/null and b/text_to_speech/utils/commons/__pycache__/hparams.cpython-38.pyc differ diff --git a/text_to_speech/utils/commons/__pycache__/indexed_datasets.cpython-38.pyc b/text_to_speech/utils/commons/__pycache__/indexed_datasets.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abd920b356f7e9cd390a9ee974eaa9c0299e690c Binary files /dev/null and b/text_to_speech/utils/commons/__pycache__/indexed_datasets.cpython-38.pyc differ diff --git a/text_to_speech/utils/commons/__pycache__/meters.cpython-38.pyc b/text_to_speech/utils/commons/__pycache__/meters.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eebb39d0783bf19e78bcc2313104c65e9f68d6d2 Binary files /dev/null and b/text_to_speech/utils/commons/__pycache__/meters.cpython-38.pyc differ diff --git a/text_to_speech/utils/commons/__pycache__/multiprocess_utils.cpython-38.pyc b/text_to_speech/utils/commons/__pycache__/multiprocess_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fade8c56254518fd4ba06971c910cf2ea84be37d Binary files /dev/null and b/text_to_speech/utils/commons/__pycache__/multiprocess_utils.cpython-38.pyc differ diff --git a/text_to_speech/utils/commons/base_task.py b/text_to_speech/utils/commons/base_task.py new file mode 100644 index 0000000000000000000000000000000000000000..3f56bde37c043b2416d8e65f9b850826ee26fad9 --- /dev/null +++ b/text_to_speech/utils/commons/base_task.py @@ -0,0 +1,238 @@ +import logging +import os +import random +import subprocess +import sys +from datetime import datetime +import numpy as np +import torch.utils.data +from torch import nn +from torch.utils.tensorboard import SummaryWriter +from text_to_speech.utils.commons.dataset_utils import data_loader +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.commons.meters import AvgrageMeter +from text_to_speech.utils.commons.tensor_utils import tensors_to_scalars +from text_to_speech.utils.commons.trainer import Trainer +from text_to_speech.utils.nn.model_utils import get_grad_norm + +torch.multiprocessing.set_sharing_strategy(os.getenv('TORCH_SHARE_STRATEGY', 'file_system')) + +log_format = '%(asctime)s %(message)s' +logging.basicConfig(stream=sys.stdout, level=logging.INFO, + format=log_format, datefmt='%m/%d %I:%M:%S %p') + + +class BaseTask(nn.Module): + def __init__(self, *args, **kwargs): + super(BaseTask, self).__init__() + self.current_epoch = 0 + self.global_step = 0 + self.trainer = None + self.use_ddp = False + self.gradient_clip_norm = hparams['clip_grad_norm'] + self.gradient_clip_val = hparams.get('clip_grad_value', 0) + self.model = None + self.training_losses_meter = None + self.logger: SummaryWriter = None + + ###################### + # build model, dataloaders, optimizer, scheduler and tensorboard + ###################### + def build_model(self): + raise NotImplementedError + + @data_loader + def train_dataloader(self): + raise NotImplementedError + + @data_loader + def test_dataloader(self): + raise NotImplementedError + + @data_loader + def val_dataloader(self): + raise NotImplementedError + + def build_scheduler(self, optimizer): + return None + + def build_optimizer(self, model): + raise NotImplementedError + + def configure_optimizers(self): + optm = self.build_optimizer(self.model) + self.scheduler = self.build_scheduler(optm) + if isinstance(optm, (list, tuple)): + return optm + return [optm] + + def build_tensorboard(self, save_dir, name, **kwargs): + log_dir = os.path.join(save_dir, name) + os.makedirs(log_dir, exist_ok=True) + self.logger = SummaryWriter(log_dir=log_dir, **kwargs) + + ###################### + # training + ###################### + def on_train_start(self): + pass + + def on_train_end(self): + pass + + def on_epoch_start(self): + self.training_losses_meter = {'total_loss': AvgrageMeter()} + + def on_epoch_end(self): + loss_outputs = {k: round(v.avg, 4) for k, v in self.training_losses_meter.items()} + print(f"Epoch {self.current_epoch} ended. Steps: {self.global_step}. {loss_outputs}") + + def _training_step(self, sample, batch_idx, optimizer_idx): + """ + + :param sample: + :param batch_idx: + :return: total loss: torch.Tensor, loss_log: dict + """ + raise NotImplementedError + + def training_step(self, sample, batch_idx, optimizer_idx=-1): + """ + + :param sample: + :param batch_idx: + :param optimizer_idx: + :return: {'loss': torch.Tensor, 'progress_bar': dict, 'tb_log': dict} + """ + loss_ret = self._training_step(sample, batch_idx, optimizer_idx) + if loss_ret is None: + return {'loss': None} + total_loss, log_outputs = loss_ret + log_outputs = tensors_to_scalars(log_outputs) + for k, v in log_outputs.items(): + if k not in self.training_losses_meter: + self.training_losses_meter[k] = AvgrageMeter() + if not np.isnan(v): + self.training_losses_meter[k].update(v) + self.training_losses_meter['total_loss'].update(total_loss.item()) + + if optimizer_idx >= 0: + log_outputs[f'lr_{optimizer_idx}'] = self.trainer.optimizers[optimizer_idx].param_groups[0]['lr'] + + progress_bar_log = log_outputs + tb_log = {f'tr/{k}': v for k, v in log_outputs.items()} + return { + 'loss': total_loss, + 'progress_bar': progress_bar_log, + 'tb_log': tb_log + } + + def on_before_optimization(self, opt_idx): + if self.gradient_clip_norm > 0: + prefix = f"grad_norm_opt_idx_{opt_idx}" + grad_norm = torch.nn.utils.clip_grad_norm_(self.parameters(), self.gradient_clip_norm) + grad_norm_dict = { + f"{prefix}/task.parameters": grad_norm + } + return grad_norm_dict + if self.gradient_clip_val > 0: + torch.nn.utils.clip_grad_value_(self.parameters(), self.gradient_clip_val) + + def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx): + if self.scheduler is not None: + self.scheduler.step(self.global_step // hparams['accumulate_grad_batches']) + + ###################### + # validation + ###################### + def validation_start(self): + pass + + def validation_step(self, sample, batch_idx): + """ + + :param sample: + :param batch_idx: + :return: output: {"losses": {...}, "total_loss": float, ...} or (total loss: torch.Tensor, loss_log: dict) + """ + raise NotImplementedError + + def validation_end(self, outputs): + """ + + :param outputs: + :return: loss_output: dict + """ + all_losses_meter = {'total_loss': AvgrageMeter()} + for output in outputs: + if len(output) == 0 or output is None: + continue + if isinstance(output, dict): + assert 'losses' in output, 'Key "losses" should exist in validation output.' + n = output.pop('nsamples', 1) + losses = tensors_to_scalars(output['losses']) + total_loss = output.get('total_loss', sum(losses.values())) + else: + assert len(output) == 2, 'Validation output should only consist of two elements: (total_loss, losses)' + n = 1 + total_loss, losses = output + losses = tensors_to_scalars(losses) + if isinstance(total_loss, torch.Tensor): + total_loss = total_loss.item() + for k, v in losses.items(): + if k not in all_losses_meter: + all_losses_meter[k] = AvgrageMeter() + all_losses_meter[k].update(v, n) + all_losses_meter['total_loss'].update(total_loss, n) + loss_output = {k: round(v.avg, 4) for k, v in all_losses_meter.items()} + print(f"| Validation results@{self.global_step}: {loss_output}") + return { + 'tb_log': {f'val/{k}': v for k, v in loss_output.items()}, + 'val_loss': loss_output['total_loss'] + } + + ###################### + # testing + ###################### + def test_start(self): + pass + + def test_step(self, sample, batch_idx): + return self.validation_step(sample, batch_idx) + + def test_end(self, outputs): + return self.validation_end(outputs) + + ###################### + # start training/testing + ###################### + @classmethod + def start(cls): + os.environ['MASTER_PORT'] = str(random.randint(15000, 30000)) + random.seed(hparams['seed']) + np.random.seed(hparams['seed']) + work_dir = hparams['work_dir'] + trainer = Trainer( + work_dir=work_dir, + val_check_interval=hparams['val_check_interval'], + tb_log_interval=hparams['tb_log_interval'], + max_updates=hparams['max_updates'], + num_sanity_val_steps=hparams['num_sanity_val_steps'] if not hparams['validate'] else 10000, + accumulate_grad_batches=hparams['accumulate_grad_batches'], + print_nan_grads=hparams['print_nan_grads'], + resume_from_checkpoint=hparams.get('resume_from_checkpoint', 0), + amp=hparams['amp'], + monitor_key=hparams['valid_monitor_key'], + monitor_mode=hparams['valid_monitor_mode'], + num_ckpt_keep=hparams['num_ckpt_keep'], + save_best=hparams['save_best'], + seed=hparams['seed'], + debug=hparams['debug'] + ) + if not hparams['infer']: # train + trainer.fit(cls) + else: + trainer.test(cls) + + def on_keyboard_interrupt(self): + pass diff --git a/text_to_speech/utils/commons/ckpt_utils.py b/text_to_speech/utils/commons/ckpt_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9c1006d5852c6cf57063ce64e773d3c40ae9500d --- /dev/null +++ b/text_to_speech/utils/commons/ckpt_utils.py @@ -0,0 +1,66 @@ +import glob +import os +import re +import torch + + +def get_last_checkpoint(work_dir, steps=None): + checkpoint = None + last_ckpt_path = None + ckpt_paths = get_all_ckpts(work_dir, steps) + if len(ckpt_paths) > 0: + last_ckpt_path = ckpt_paths[0] + checkpoint = torch.load(last_ckpt_path, map_location='cpu') + return checkpoint, last_ckpt_path + + +def get_all_ckpts(work_dir, steps=None): + if steps is None: + ckpt_path_pattern = f'{work_dir}/model_ckpt_steps_*.ckpt' + else: + ckpt_path_pattern = f'{work_dir}/model_ckpt_steps_{steps}.ckpt' + return sorted(glob.glob(ckpt_path_pattern), + key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0])) + + +def load_ckpt(cur_model, ckpt_base_dir, model_name='model', force=True, strict=True): + if os.path.isfile(ckpt_base_dir): + base_dir = os.path.dirname(ckpt_base_dir) + ckpt_path = ckpt_base_dir + checkpoint = torch.load(ckpt_base_dir, map_location='cpu') + else: + base_dir = ckpt_base_dir + checkpoint, ckpt_path = get_last_checkpoint(ckpt_base_dir) + if checkpoint is not None: + state_dict = checkpoint["state_dict"] + if len([k for k in state_dict.keys() if '.' in k]) > 0: + state_dict = {k[len(model_name) + 1:]: v for k, v in state_dict.items() + if k.startswith(f'{model_name}.')} + else: + if '.' not in model_name: + state_dict = state_dict[model_name] + else: + base_model_name = model_name.split('.')[0] + rest_model_name = model_name[len(base_model_name) + 1:] + state_dict = { + k[len(rest_model_name) + 1:]: v for k, v in state_dict[base_model_name].items() + if k.startswith(f'{rest_model_name}.')} + if not strict: + cur_model_state_dict = cur_model.state_dict() + unmatched_keys = [] + for key, param in state_dict.items(): + if key in cur_model_state_dict: + new_param = cur_model_state_dict[key] + if new_param.shape != param.shape: + unmatched_keys.append(key) + print("| Unmatched keys: ", key, new_param.shape, param.shape) + for key in unmatched_keys: + del state_dict[key] + cur_model.load_state_dict(state_dict, strict=strict) + print(f"| load '{model_name}' from '{ckpt_path}'.") + else: + e_msg = f"| ckpt not found in {base_dir}." + if force: + assert False, e_msg + else: + print(e_msg) diff --git a/text_to_speech/utils/commons/dataset_utils.py b/text_to_speech/utils/commons/dataset_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e98d40d96f3a125837d2d7f2b2e42c05afe69978 --- /dev/null +++ b/text_to_speech/utils/commons/dataset_utils.py @@ -0,0 +1,247 @@ +import os +import sys +import traceback +import types +from functools import wraps +from itertools import chain +import numpy as np +import torch.utils.data +from torch.utils.data import ConcatDataset +from text_to_speech.utils.commons.hparams import hparams + + +def collate_1d_or_2d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None, shift_id=1): + if len(values[0].shape) == 1: + return collate_1d(values, pad_idx, left_pad, shift_right, max_len, shift_id) + else: + return collate_2d(values, pad_idx, left_pad, shift_right, max_len) + + +def collate_1d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None, shift_id=1): + """Convert a list of 1d tensors into a padded 2d tensor.""" + size = max(v.size(0) for v in values) if max_len is None else max_len + res = values[0].new(len(values), size).fill_(pad_idx) + + def copy_tensor(src, dst): + assert dst.numel() == src.numel() + if shift_right: + dst[1:] = src[:-1] + dst[0] = shift_id + else: + dst.copy_(src) + + for i, v in enumerate(values): + copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)]) + return res + + +def collate_2d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None): + """Convert a list of 2d tensors into a padded 3d tensor.""" + size = max(v.size(0) for v in values) if max_len is None else max_len + res = values[0].new(len(values), size, values[0].shape[1]).fill_(pad_idx) + + def copy_tensor(src, dst): + assert dst.numel() == src.numel() + if shift_right: + dst[1:] = src[:-1] + else: + dst.copy_(src) + + for i, v in enumerate(values): + copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)]) + return res + + +def _is_batch_full(batch, num_tokens, max_tokens, max_sentences): + if len(batch) == 0: + return 0 + if len(batch) == max_sentences: + return 1 + if num_tokens > max_tokens: + return 1 + return 0 + + +def batch_by_size( + indices, num_tokens_fn, max_tokens=None, max_sentences=None, + required_batch_size_multiple=1, distributed=False +): + """ + Yield mini-batches of indices bucketed by size. Batches may contain + sequences of different lengths. + + Args: + indices (List[int]): ordered list of dataset indices + num_tokens_fn (callable): function that returns the number of tokens at + a given index + max_tokens (int, optional): max number of tokens in each batch + (default: None). + max_sentences (int, optional): max number of sentences in each + batch (default: None). + required_batch_size_multiple (int, optional): require batch size to + be a multiple of N (default: 1). + """ + max_tokens = max_tokens if max_tokens is not None else sys.maxsize + max_sentences = max_sentences if max_sentences is not None else sys.maxsize + bsz_mult = required_batch_size_multiple + + if isinstance(indices, types.GeneratorType): + indices = np.fromiter(indices, dtype=np.int64, count=-1) + + sample_len = 0 + sample_lens = [] + batch = [] + batches = [] + for i in range(len(indices)): + idx = indices[i] + num_tokens = num_tokens_fn(idx) + sample_lens.append(num_tokens) + sample_len = max(sample_len, num_tokens) + + assert sample_len <= max_tokens, ( + "sentence at index {} of size {} exceeds max_tokens " + "limit of {}!".format(idx, sample_len, max_tokens) + ) + num_tokens = (len(batch) + 1) * sample_len + + if _is_batch_full(batch, num_tokens, max_tokens, max_sentences): + mod_len = max( + bsz_mult * (len(batch) // bsz_mult), + len(batch) % bsz_mult, + ) + batches.append(batch[:mod_len]) + batch = batch[mod_len:] + sample_lens = sample_lens[mod_len:] + sample_len = max(sample_lens) if len(sample_lens) > 0 else 0 + batch.append(idx) + if len(batch) > 0: + batches.append(batch) + return batches + + +def unpack_dict_to_list(samples): + samples_ = [] + bsz = samples.get('outputs').size(0) + for i in range(bsz): + res = {} + for k, v in samples.items(): + try: + res[k] = v[i] + except: + pass + samples_.append(res) + return samples_ + + +def remove_padding(x, padding_idx=0): + if x is None: + return None + assert len(x.shape) in [1, 2] + if len(x.shape) == 2: # [T, H] + return x[np.abs(x).sum(-1) != padding_idx] + elif len(x.shape) == 1: # [T] + return x[x != padding_idx] + + +def data_loader(fn): + """ + Decorator to make any fx with this use the lazy property + :param fn: + :return: + """ + + wraps(fn) + attr_name = '_lazy_' + fn.__name__ + + def _get_data_loader(self): + try: + value = getattr(self, attr_name) + except AttributeError: + try: + value = fn(self) # Lazy evaluation, done only once. + except AttributeError as e: + # Guard against AttributeError suppression. (Issue #142) + traceback.print_exc() + error = f'{fn.__name__}: An AttributeError was encountered: ' + str(e) + raise RuntimeError(error) from e + setattr(self, attr_name, value) # Memoize evaluation. + return value + + return _get_data_loader + + +class BaseDataset(torch.utils.data.Dataset): + def __init__(self, shuffle): + super().__init__() + self.hparams = hparams + self.shuffle = shuffle + self.sort_by_len = hparams['sort_by_len'] + self.sizes = None + + @property + def _sizes(self): + return self.sizes + + def __getitem__(self, index): + raise NotImplementedError + + def collater(self, samples): + raise NotImplementedError + + def __len__(self): + return len(self._sizes) + + def num_tokens(self, index): + return self.size(index) + + def size(self, index): + """Return an example's size as a float or tuple. This value is used when + filtering a dataset with ``--max-positions``.""" + return min(self._sizes[index], hparams['max_frames']) + + def ordered_indices(self): + """Return an ordered list of indices. Batches will be constructed based + on this order.""" + if self.shuffle: + indices = np.random.permutation(len(self)) + if self.sort_by_len: + indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')] + else: + indices = np.arange(len(self)) + return indices + + @property + def num_workers(self): + return int(os.getenv('NUM_WORKERS', hparams['ds_workers'])) + + +class BaseConcatDataset(ConcatDataset): + def collater(self, samples): + return self.datasets[0].collater(samples) + + @property + def _sizes(self): + if not hasattr(self, 'sizes'): + self.sizes = list(chain.from_iterable([d._sizes for d in self.datasets])) + return self.sizes + + def size(self, index): + return min(self._sizes[index], hparams['max_frames']) + + def num_tokens(self, index): + return self.size(index) + + def ordered_indices(self): + """Return an ordered list of indices. Batches will be constructed based + on this order.""" + if self.datasets[0].shuffle: + indices = np.random.permutation(len(self)) + if self.datasets[0].sort_by_len: + indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')] + else: + indices = np.arange(len(self)) + return indices + + @property + def num_workers(self): + return self.datasets[0].num_workers diff --git a/text_to_speech/utils/commons/ddp_utils.py b/text_to_speech/utils/commons/ddp_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4b529198c13a1ffc622baea6e5178407b24aee8f --- /dev/null +++ b/text_to_speech/utils/commons/ddp_utils.py @@ -0,0 +1,137 @@ +from torch.nn.parallel import DistributedDataParallel +from torch.nn.parallel.distributed import _find_tensors +import torch.optim +import torch.utils.data +import torch +from packaging import version + +class DDP(DistributedDataParallel): + """ + Override the forward call in lightning so it goes to training and validation step respectively + """ + + def forward(self, *inputs, **kwargs): # pragma: no cover + if version.parse(torch.__version__[:6]) < version.parse("1.11"): + self._sync_params() + inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) + assert len(self.device_ids) == 1 + if self.module.training: + output = self.module.training_step(*inputs[0], **kwargs[0]) + elif self.module.testing: + output = self.module.test_step(*inputs[0], **kwargs[0]) + else: + output = self.module.validation_step(*inputs[0], **kwargs[0]) + if torch.is_grad_enabled(): + # We'll return the output object verbatim since it is a freeform + # object. We need to find any tensors in this object, though, + # because we need to figure out which parameters were used during + # this forward pass, to ensure we short circuit reduction for any + # unused parameters. Only if `find_unused_parameters` is set. + if self.find_unused_parameters: + self.reducer.prepare_for_backward(list(_find_tensors(output))) + else: + self.reducer.prepare_for_backward([]) + else: + from torch.nn.parallel.distributed import \ + logging, Join, _DDPSink, _tree_flatten_with_rref, _tree_unflatten_with_rref + with torch.autograd.profiler.record_function("DistributedDataParallel.forward"): + if torch.is_grad_enabled() and self.require_backward_grad_sync: + self.logger.set_runtime_stats_and_log() + self.num_iterations += 1 + self.reducer.prepare_for_forward() + + # Notify the join context that this process has not joined, if + # needed + work = Join.notify_join_context(self) + if work: + self.reducer._set_forward_pass_work_handle( + work, self._divide_by_initial_world_size + ) + + # Calling _rebuild_buckets before forward compuation, + # It may allocate new buckets before deallocating old buckets + # inside _rebuild_buckets. To save peak memory usage, + # call _rebuild_buckets before the peak memory usage increases + # during forward computation. + # This should be called only once during whole training period. + if torch.is_grad_enabled() and self.reducer._rebuild_buckets(): + logging.info("Reducer buckets have been rebuilt in this iteration.") + self._has_rebuilt_buckets = True + + # sync params according to location (before/after forward) user + # specified as part of hook, if hook was specified. + buffer_hook_registered = hasattr(self, 'buffer_hook') + if self._check_sync_bufs_pre_fwd(): + self._sync_buffers() + + if self._join_config.enable: + # Notify joined ranks whether they should sync in backwards pass or not. + self._check_global_requires_backward_grad_sync(is_joined_rank=False) + + inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) + if self.module.training: + output = self.module.training_step(*inputs[0], **kwargs[0]) + elif self.module.testing: + output = self.module.test_step(*inputs[0], **kwargs[0]) + else: + output = self.module.validation_step(*inputs[0], **kwargs[0]) + + # sync params according to location (before/after forward) user + # specified as part of hook, if hook was specified. + if self._check_sync_bufs_post_fwd(): + self._sync_buffers() + + if torch.is_grad_enabled() and self.require_backward_grad_sync: + self.require_forward_param_sync = True + # We'll return the output object verbatim since it is a freeform + # object. We need to find any tensors in this object, though, + # because we need to figure out which parameters were used during + # this forward pass, to ensure we short circuit reduction for any + # unused parameters. Only if `find_unused_parameters` is set. + if self.find_unused_parameters and not self.static_graph: + # Do not need to populate this for static graph. + self.reducer.prepare_for_backward(list(_find_tensors(output))) + else: + self.reducer.prepare_for_backward([]) + else: + self.require_forward_param_sync = False + + # TODO: DDPSink is currently enabled for unused parameter detection and + # static graph training for first iteration. + if (self.find_unused_parameters and not self.static_graph) or ( + self.static_graph and self.num_iterations == 1 + ): + state_dict = { + 'static_graph': self.static_graph, + 'num_iterations': self.num_iterations, + } + + output_tensor_list, treespec, output_is_rref = _tree_flatten_with_rref( + output + ) + output_placeholders = [None for _ in range(len(output_tensor_list))] + # Do not touch tensors that have no grad_fn, which can cause issues + # such as https://github.com/pytorch/pytorch/issues/60733 + for i, output in enumerate(output_tensor_list): + if torch.is_tensor(output) and output.grad_fn is None: + output_placeholders[i] = output + + # When find_unused_parameters=True, makes tensors which require grad + # run through the DDPSink backward pass. When not all outputs are + # used in loss, this makes those corresponding tensors receive + # undefined gradient which the reducer then handles to ensure + # param.grad field is not touched and we don't error out. + passthrough_tensor_list = _DDPSink.apply( + self.reducer, + state_dict, + *output_tensor_list, + ) + for i in range(len(output_placeholders)): + if output_placeholders[i] is None: + output_placeholders[i] = passthrough_tensor_list[i] + + # Reconstruct output data structure. + output = _tree_unflatten_with_rref( + output_placeholders, treespec, output_is_rref + ) + return output diff --git a/text_to_speech/utils/commons/hparams.py b/text_to_speech/utils/commons/hparams.py new file mode 100644 index 0000000000000000000000000000000000000000..70abf2513ad50352b29086a9d455f6fb1bec33fc --- /dev/null +++ b/text_to_speech/utils/commons/hparams.py @@ -0,0 +1,131 @@ +import argparse +import os +import yaml + +from text_to_speech.utils.os_utils import remove_file + +global_print_hparams = True +hparams = {} + + +class Args: + def __init__(self, **kwargs): + for k, v in kwargs.items(): + self.__setattr__(k, v) + + +def override_config(old_config: dict, new_config: dict): + for k, v in new_config.items(): + if isinstance(v, dict) and k in old_config: + override_config(old_config[k], new_config[k]) + else: + old_config[k] = v + + +def set_hparams(config='', exp_name='', hparams_str='', print_hparams=True, global_hparams=True): + if config == '' and exp_name == '': + parser = argparse.ArgumentParser(description='') + parser.add_argument('--config', type=str, default='', + help='location of the data corpus') + parser.add_argument('--exp_name', type=str, default='', help='exp_name') + parser.add_argument('-hp', '--hparams', type=str, default='', + help='location of the data corpus') + parser.add_argument('--infer', action='store_true', help='infer') + parser.add_argument('--validate', action='store_true', help='validate') + parser.add_argument('--reset', action='store_true', help='reset hparams') + parser.add_argument('--remove', action='store_true', help='remove old ckpt') + parser.add_argument('--debug', action='store_true', help='debug') + args, unknown = parser.parse_known_args() + print("| Unknow hparams: ", unknown) + else: + args = Args(config=config, exp_name=exp_name, hparams=hparams_str, + infer=False, validate=False, reset=False, debug=False, remove=False) + global hparams + assert args.config != '' or args.exp_name != '' + if args.config != '': + assert os.path.exists(args.config) + + config_chains = [] + loaded_config = set() + + def load_config(config_fn): + # deep first inheritance and avoid the second visit of one node + if not os.path.exists(config_fn): + return {} + with open(config_fn) as f: + hparams_ = yaml.safe_load(f) + loaded_config.add(config_fn) + if 'base_config' in hparams_: + ret_hparams = {} + if not isinstance(hparams_['base_config'], list): + hparams_['base_config'] = [hparams_['base_config']] + for c in hparams_['base_config']: + if c.startswith('.'): + c = f'{os.path.dirname(config_fn)}/{c}' + c = os.path.normpath(c) + if c not in loaded_config: + override_config(ret_hparams, load_config(c)) + override_config(ret_hparams, hparams_) + else: + ret_hparams = hparams_ + config_chains.append(config_fn) + return ret_hparams + + saved_hparams = {} + args_work_dir = '' + if args.exp_name != '': + args_work_dir = f'checkpoints/{args.exp_name}' + ckpt_config_path = f'{args_work_dir}/config.yaml' + if os.path.exists(ckpt_config_path): + with open(ckpt_config_path) as f: + saved_hparams_ = yaml.safe_load(f) + if saved_hparams_ is not None: + saved_hparams.update(saved_hparams_) + hparams_ = {} + if args.config != '': + hparams_.update(load_config(args.config)) + if not args.reset: + hparams_.update(saved_hparams) + hparams_['work_dir'] = args_work_dir + + # Support config overriding in command line. Support list type config overriding. + # Examples: --hparams="a=1,b.c=2,d=[1 1 1]" + if args.hparams != "": + for new_hparam in args.hparams.split(","): + k, v = new_hparam.split("=") + v = v.strip("\'\" ") + config_node = hparams_ + for k_ in k.split(".")[:-1]: + config_node = config_node[k_] + k = k.split(".")[-1] + if v in ['True', 'False'] or type(config_node[k]) in [bool, list, dict]: + if type(config_node[k]) == list: + v = v.replace(" ", ",") + config_node[k] = eval(v) + else: + config_node[k] = type(config_node[k])(v) + if args_work_dir != '' and args.remove: + answer = input("REMOVE old checkpoint? Y/N [Default: N]: ") + if answer.lower() == "y": + remove_file(args_work_dir) + if args_work_dir != '' and (not os.path.exists(ckpt_config_path) or args.reset) and not args.infer: + os.makedirs(hparams_['work_dir'], exist_ok=True) + with open(ckpt_config_path, 'w') as f: + yaml.safe_dump(hparams_, f) + + hparams_['infer'] = args.infer + hparams_['debug'] = args.debug + hparams_['validate'] = args.validate + hparams_['exp_name'] = args.exp_name + global global_print_hparams + if global_hparams: + hparams.clear() + hparams.update(hparams_) + if print_hparams and global_print_hparams and global_hparams: + print('| Hparams chains: ', config_chains) + print('| Hparams: ') + for i, (k, v) in enumerate(sorted(hparams_.items())): + print(f"\033[;33;m{k}\033[0m: {v}, ", end="\n" if i % 5 == 4 else "") + print("") + global_print_hparams = False + return hparams_ diff --git a/text_to_speech/utils/commons/indexed_datasets.py b/text_to_speech/utils/commons/indexed_datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..13e3b42bde738c656654ebad803916fbb119f221 --- /dev/null +++ b/text_to_speech/utils/commons/indexed_datasets.py @@ -0,0 +1,77 @@ +import pickle +from copy import deepcopy + +import numpy as np + + +class IndexedDataset: + def __init__(self, path, num_cache=0): + super().__init__() + self.path = path + self.data_file = None + self.data_offsets = np.load(f"{path}.idx", allow_pickle=True).item()['offsets'] + self.data_file = open(f"{path}.data", 'rb', buffering=-1) + # self.cache = [] + self.cache = {} + self.num_cache = num_cache + + def check_index(self, i): + if i < 0 or i >= len(self.data_offsets) - 1: + raise IndexError('index out of range') + + def __del__(self): + if self.data_file: + self.data_file.close() + + def __getitem__(self, i): + self.check_index(i) + + if self.num_cache > 0: + if i in self.cache.keys(): + return self.cache[i] + # for c in self.cache: + # if c[0] == i: + # return c[1] + self.data_file.seek(self.data_offsets[i]) + b = self.data_file.read(self.data_offsets[i + 1] - self.data_offsets[i]) + item = pickle.loads(b) + if self.num_cache > 0 and len(self.cache) < self.num_cache: + if i not in self.cache.keys(): + self.cache[i] = deepcopy(item) + # self.cache = [(i, deepcopy(item))] + self.cache[:-1] + return item + + def __len__(self): + return len(self.data_offsets) - 1 + +class IndexedDatasetBuilder: + def __init__(self, path): + self.path = path + self.out_file = open(f"{path}.data", 'wb') + self.byte_offsets = [0] + + def add_item(self, item): + s = pickle.dumps(item) + bytes = self.out_file.write(s) + self.byte_offsets.append(self.byte_offsets[-1] + bytes) + + def finalize(self): + self.out_file.close() + np.save(open(f"{self.path}.idx", 'wb'), {'offsets': self.byte_offsets}) + + +if __name__ == "__main__": + import random + from tqdm import tqdm + ds_path = '/tmp/indexed_ds_example' + size = 100 + items = [{"a": np.random.normal(size=[10000, 10]), + "b": np.random.normal(size=[10000, 10])} for i in range(size)] + builder = IndexedDatasetBuilder(ds_path) + for i in tqdm(range(size)): + builder.add_item(items[i]) + builder.finalize() + ds = IndexedDataset(ds_path) + for i in tqdm(range(10000)): + idx = random.randint(0, size - 1) + assert (ds[idx]['a'] == items[idx]['a']).all() diff --git a/text_to_speech/utils/commons/meters.py b/text_to_speech/utils/commons/meters.py new file mode 100644 index 0000000000000000000000000000000000000000..1b5716547cefd33fb68ab99dc2a6a70f55336625 --- /dev/null +++ b/text_to_speech/utils/commons/meters.py @@ -0,0 +1,42 @@ +import time +import torch + + +class AvgrageMeter(object): + + def __init__(self): + self.reset() + + def reset(self): + self.avg = 0 + self.sum = 0 + self.cnt = 0 + + def update(self, val, n=1): + self.sum += val * n + self.cnt += n + self.avg = self.sum / self.cnt + + +class Timer: + timer_map = {} + + def __init__(self, name, enable=False): + if name not in Timer.timer_map: + Timer.timer_map[name] = 0 + self.name = name + self.enable = enable + + def __enter__(self): + if self.enable: + # if torch.cuda.is_available(): + # torch.cuda.synchronize() + self.t = time.time() + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.enable: + # if torch.cuda.is_available(): + # torch.cuda.synchronize() + Timer.timer_map[self.name] += time.time() - self.t + if self.enable: + print(f'[Timer] {self.name}: {Timer.timer_map[self.name]}') diff --git a/text_to_speech/utils/commons/multiprocess_utils.py b/text_to_speech/utils/commons/multiprocess_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e2773543c702d2819559dfde4c5febab03899790 --- /dev/null +++ b/text_to_speech/utils/commons/multiprocess_utils.py @@ -0,0 +1,130 @@ +import os +import traceback +from functools import partial +from tqdm import tqdm + + +def chunked_worker(worker_id, args_queue=None, results_queue=None, init_ctx_func=None): + ctx = init_ctx_func(worker_id) if init_ctx_func is not None else None + while True: + args = args_queue.get() + if args == '': + return + job_idx, map_func, arg = args + try: + map_func_ = partial(map_func, ctx=ctx) if ctx is not None else map_func + if isinstance(arg, dict): + res = map_func_(**arg) + elif isinstance(arg, (list, tuple)): + res = map_func_(*arg) + else: + res = map_func_(arg) + results_queue.put((job_idx, res)) + except: + traceback.print_exc() + results_queue.put((job_idx, None)) + + +class MultiprocessManager: + def __init__(self, num_workers=None, init_ctx_func=None, multithread=False, queue_max=-1): + if multithread: + from multiprocessing.dummy import Queue, Process + else: + from multiprocessing import Queue, Process + if num_workers is None: + num_workers = int(os.getenv('N_PROC', os.cpu_count())) + self.num_workers = num_workers + self.results_queue = Queue(maxsize=-1) + self.jobs_pending = [] + self.args_queue = Queue(maxsize=queue_max) + self.workers = [] + self.total_jobs = 0 + self.multithread = multithread + for i in range(num_workers): + if multithread: + p = Process(target=chunked_worker, + args=(i, self.args_queue, self.results_queue, init_ctx_func)) + else: + p = Process(target=chunked_worker, + args=(i, self.args_queue, self.results_queue, init_ctx_func), + daemon=True) + self.workers.append(p) + p.start() + + def add_job(self, func, args): + if not self.args_queue.full(): + self.args_queue.put((self.total_jobs, func, args)) + else: + self.jobs_pending.append((self.total_jobs, func, args)) + self.total_jobs += 1 + + def get_results(self): + self.n_finished = 0 + while self.n_finished < self.total_jobs: + while len(self.jobs_pending) > 0 and not self.args_queue.full(): + self.args_queue.put(self.jobs_pending[0]) + self.jobs_pending = self.jobs_pending[1:] + job_id, res = self.results_queue.get() + yield job_id, res + self.n_finished += 1 + for w in range(self.num_workers): + self.args_queue.put("") + for w in self.workers: + w.join() + + def close(self): + if not self.multithread: + for w in self.workers: + w.terminate() + + def __len__(self): + return self.total_jobs + + +def multiprocess_run_tqdm(map_func, args, num_workers=None, ordered=True, init_ctx_func=None, + multithread=False, queue_max=-1, desc=None): + for i, res in tqdm( + multiprocess_run(map_func, args, num_workers, ordered, init_ctx_func, multithread, + queue_max=queue_max), + total=len(args), desc=desc): + yield i, res + + +def multiprocess_run(map_func, args, num_workers=None, ordered=True, init_ctx_func=None, multithread=False, + queue_max=-1): + """ + Multiprocessing running chunked jobs. + + Examples: + >>> for res in tqdm(multiprocess_run(job_func, args): + >>> print(res) + + :param map_func: + :param args: + :param num_workers: + :param ordered: + :param init_ctx_func: + :param q_max_size: + :param multithread: + :return: + """ + if num_workers is None: + num_workers = int(os.getenv('N_PROC', os.cpu_count())) + # num_workers = 1 + manager = MultiprocessManager(num_workers, init_ctx_func, multithread, queue_max=queue_max) + for arg in args: + manager.add_job(map_func, arg) + if ordered: + n_jobs = len(args) + results = ['' for _ in range(n_jobs)] + i_now = 0 + for job_i, res in manager.get_results(): + results[job_i] = res + while i_now < n_jobs and (not isinstance(results[i_now], str) or results[i_now] != ''): + yield i_now, results[i_now] + results[i_now] = None + i_now += 1 + else: + for job_i, res in manager.get_results(): + yield job_i, res + manager.close() diff --git a/text_to_speech/utils/commons/single_thread_env.py b/text_to_speech/utils/commons/single_thread_env.py new file mode 100644 index 0000000000000000000000000000000000000000..849219afd2cddec2ec6d489f12f60a34994bfb80 --- /dev/null +++ b/text_to_speech/utils/commons/single_thread_env.py @@ -0,0 +1,5 @@ +import os + +os.environ["OMP_NUM_THREADS"] = "1" +os.environ['TF_NUM_INTEROP_THREADS'] = '1' +os.environ['TF_NUM_INTRAOP_THREADS'] = '1' diff --git a/text_to_speech/utils/commons/tensor_utils.py b/text_to_speech/utils/commons/tensor_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..be4b69a4f135b95fcf18618668ed909314f24871 --- /dev/null +++ b/text_to_speech/utils/commons/tensor_utils.py @@ -0,0 +1,92 @@ +import torch +import torch.distributed as dist + + +def reduce_tensors(metrics): + new_metrics = {} + for k, v in metrics.items(): + if isinstance(v, torch.Tensor): + dist.all_reduce(v) + v = v / dist.get_world_size() + if type(v) is dict: + v = reduce_tensors(v) + new_metrics[k] = v + return new_metrics + + +def tensors_to_scalars(tensors): + if isinstance(tensors, torch.Tensor): + tensors = tensors.item() + return tensors + elif isinstance(tensors, dict): + new_tensors = {} + for k, v in tensors.items(): + v = tensors_to_scalars(v) + new_tensors[k] = v + return new_tensors + elif isinstance(tensors, list): + return [tensors_to_scalars(v) for v in tensors] + else: + return tensors + + +def tensors_to_np(tensors): + if isinstance(tensors, dict): + new_np = {} + for k, v in tensors.items(): + if isinstance(v, torch.Tensor): + v = v.cpu().numpy() + if type(v) is dict: + v = tensors_to_np(v) + new_np[k] = v + elif isinstance(tensors, list): + new_np = [] + for v in tensors: + if isinstance(v, torch.Tensor): + v = v.cpu().numpy() + if type(v) is dict: + v = tensors_to_np(v) + new_np.append(v) + elif isinstance(tensors, torch.Tensor): + v = tensors + if isinstance(v, torch.Tensor): + v = v.cpu().numpy() + if type(v) is dict: + v = tensors_to_np(v) + new_np = v + else: + raise Exception(f'tensors_to_np does not support type {type(tensors)}.') + return new_np + + +def move_to_cpu(tensors): + ret = {} + for k, v in tensors.items(): + if isinstance(v, torch.Tensor): + v = v.cpu() + if type(v) is dict: + v = move_to_cpu(v) + ret[k] = v + return ret + + +def move_to_cuda(batch, gpu_id=0): + # base case: object can be directly moved using `cuda` or `to` + if callable(getattr(batch, 'cuda', None)): + return batch.cuda(gpu_id, non_blocking=True) + elif callable(getattr(batch, 'to', None)): + return batch.to(torch.device('cuda', gpu_id), non_blocking=True) + elif isinstance(batch, list): + for i, x in enumerate(batch): + batch[i] = move_to_cuda(x, gpu_id) + return batch + elif isinstance(batch, tuple): + batch = list(batch) + for i, x in enumerate(batch): + batch[i] = move_to_cuda(x, gpu_id) + return tuple(batch) + elif isinstance(batch, dict): + for k, v in batch.items(): + batch[k] = move_to_cuda(v, gpu_id) + return batch + return batch diff --git a/text_to_speech/utils/commons/trainer.py b/text_to_speech/utils/commons/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..dbb190dd6cfb938071de77ffecda560c9ddecc85 --- /dev/null +++ b/text_to_speech/utils/commons/trainer.py @@ -0,0 +1,561 @@ +import random +import subprocess +import traceback +from datetime import datetime + +from torch.cuda.amp import GradScaler, autocast +import numpy as np +import torch.optim +import torch.utils.data +import copy +import logging +import os +import re +import sys +import torch +import torch.distributed as dist +import torch.multiprocessing as mp +import tqdm + +from text_to_speech.utils.commons.ckpt_utils import get_last_checkpoint, get_all_ckpts +from text_to_speech.utils.commons.ddp_utils import DDP +from text_to_speech.utils.commons.hparams import hparams +from text_to_speech.utils.commons.tensor_utils import move_to_cuda +from text_to_speech.utils.os_utils import remove_file + + +class Tee(object): + def __init__(self, name, mode): + self.file = open(name, mode) + self.stdout = sys.stdout + sys.stdout = self + + def __del__(self): + sys.stdout = self.stdout + self.file.close() + + def write(self, data): + self.file.write(data) + self.stdout.write(data) + + def flush(self): + self.file.flush() + + +class Trainer: + def __init__( + self, + work_dir, + default_save_path=None, + accumulate_grad_batches=1, + max_updates=160000, + print_nan_grads=False, + val_check_interval=2000, + num_sanity_val_steps=5, + amp=False, + # tb logger + log_save_interval=100, + tb_log_interval=10, + # checkpoint + monitor_key='val_loss', + monitor_mode='min', + num_ckpt_keep=5, + save_best=True, + resume_from_checkpoint=0, + seed=1234, + debug=False, + ): + os.makedirs(work_dir, exist_ok=True) + self.work_dir = work_dir + self.accumulate_grad_batches = accumulate_grad_batches + self.max_updates = max_updates + self.num_sanity_val_steps = num_sanity_val_steps + self.print_nan_grads = print_nan_grads + self.default_save_path = default_save_path + self.resume_from_checkpoint = resume_from_checkpoint if resume_from_checkpoint > 0 else None + self.seed = seed + self.debug = debug + # model and optm + self.task = None + self.optimizers = [] + + # trainer state + self.testing = False + self.global_step = 0 + self.current_epoch = 0 + self.total_batches = 0 + + # configure checkpoint + self.monitor_key = monitor_key + self.num_ckpt_keep = num_ckpt_keep + self.save_best = save_best + self.monitor_op = np.less if monitor_mode == 'min' else np.greater + self.best_val_results = np.Inf if monitor_mode == 'min' else -np.Inf + self.mode = 'min' + + # allow int, string and gpu list + self.all_gpu_ids = [ + int(x) for x in os.environ.get("CUDA_VISIBLE_DEVICES", "").split(",") if x != ''] + self.num_gpus = len(self.all_gpu_ids) + self.on_gpu = self.num_gpus > 0 + self.root_gpu = 0 + logging.info(f'GPU available: {torch.cuda.is_available()}, GPU used: {self.all_gpu_ids}') + self.use_ddp = self.num_gpus > 1 + self.proc_rank = 0 + # Tensorboard logging + self.log_save_interval = log_save_interval + self.val_check_interval = val_check_interval + self.tb_log_interval = tb_log_interval + self.amp = amp + self.amp_scalar = GradScaler() + + def test(self, task_cls): + self.testing = True + self.fit(task_cls) + + def fit(self, task_cls): + if len(self.all_gpu_ids) > 1: + mp.spawn(self.ddp_run, nprocs=self.num_gpus, args=(task_cls, copy.deepcopy(hparams))) + else: + self.task = task_cls() + self.task.trainer = self + self.run_single_process(self.task) + return 1 + + def ddp_run(self, gpu_idx, task_cls, hparams_): + hparams.update(hparams_) + self.proc_rank = gpu_idx + self.init_ddp_connection(self.proc_rank, self.num_gpus) + if dist.get_rank() != 0 and not self.debug: + sys.stdout = open(os.devnull, "w") + sys.stderr = open(os.devnull, "w") + task = task_cls() + task.trainer = self + torch.cuda.set_device(gpu_idx) + self.root_gpu = gpu_idx + self.task = task + self.run_single_process(task) + + def run_single_process(self, task): + """Sanity check a few things before starting actual training. + + :param task: + """ + # build model, optm and load checkpoint + if self.proc_rank == 0: + self.save_terminal_logs() + if not self.testing: + self.save_codes() + + model = task.build_model() + if model is not None: + task.model = model + checkpoint, _ = get_last_checkpoint(self.work_dir, self.resume_from_checkpoint) + if checkpoint is not None: + self.restore_weights(checkpoint) + elif self.on_gpu: + task.cuda(self.root_gpu) + if not self.testing: + self.optimizers = task.configure_optimizers() + self.fisrt_epoch = True + if checkpoint is not None: + self.restore_opt_state(checkpoint) + del checkpoint + # clear cache after restore + if self.on_gpu: + torch.cuda.empty_cache() + + if self.use_ddp: + self.task = self.configure_ddp(self.task) + dist.barrier() + + task_ref = self.get_task_ref() + task_ref.trainer = self + task_ref.testing = self.testing + # link up experiment object + if self.proc_rank == 0: + task_ref.build_tensorboard(save_dir=self.work_dir, name='tb_logs') + else: + os.makedirs('tmp', exist_ok=True) + task_ref.build_tensorboard(save_dir='tmp', name='tb_tmp') + self.logger = task_ref.logger + try: + if self.testing: + self.run_evaluation(test=True) + else: + self.train() + except KeyboardInterrupt as e: + traceback.print_exc() + task_ref.on_keyboard_interrupt() + + #################### + # valid and test + #################### + def run_evaluation(self, test=False): + eval_results = self.evaluate(self.task, test, tqdm_desc='Valid' if not test else 'test', + max_batches=hparams['eval_max_batches']) + if eval_results is not None and 'tb_log' in eval_results: + tb_log_output = eval_results['tb_log'] + self.log_metrics_to_tb(tb_log_output) + if self.proc_rank == 0 and not test: + self.save_checkpoint(epoch=self.current_epoch, logs=eval_results) + + def evaluate(self, task, test=False, tqdm_desc='Valid', max_batches=None): + if max_batches == -1: + max_batches = None + # enable eval mode + task.zero_grad() + task.eval() + torch.set_grad_enabled(False) + + task_ref = self.get_task_ref() + if test: + ret = task_ref.test_start() + if ret == 'EXIT': + return + else: + task_ref.validation_start() + outputs = [] + dataloader = task_ref.test_dataloader() if test else task_ref.val_dataloader() + pbar = tqdm.tqdm(dataloader, desc=tqdm_desc, total=max_batches, dynamic_ncols=True, unit='step', + disable=self.root_gpu > 0) + # give model a chance to do something with the outputs (and method defined) + for batch_idx, batch in enumerate(pbar): + if batch is None: # pragma: no cover + continue + # stop short when on fast_dev_run (sets max_batch=1) + if max_batches is not None and batch_idx >= max_batches: + break + + # make dataloader_idx arg in validation_step optional + if self.on_gpu: + batch = move_to_cuda(batch, self.root_gpu) + args = [batch, batch_idx] + if self.use_ddp: + output = task(*args) + else: + if test: + output = task_ref.test_step(*args) + else: + output = task_ref.validation_step(*args) + # track outputs for collation + outputs.append(output) + # give model a chance to do something with the outputs (and method defined) + if test: + eval_results = task_ref.test_end(outputs) + else: + eval_results = task_ref.validation_end(outputs) + # enable train mode again + task.train() + torch.set_grad_enabled(True) + return eval_results + + #################### + # train + #################### + def train(self): + task_ref = self.get_task_ref() + task_ref.on_train_start() + if self.num_sanity_val_steps > 0: + # run tiny validation (if validation defined) to make sure program won't crash during val + self.evaluate(self.task, False, 'Sanity Val', max_batches=self.num_sanity_val_steps) + # clear cache before training + if self.on_gpu: + torch.cuda.empty_cache() + dataloader = task_ref.train_dataloader() + epoch = self.current_epoch + # run all epochs + while True: + # set seed for distributed sampler (enables shuffling for each epoch) + if self.use_ddp and hasattr(dataloader.sampler, 'set_epoch'): + dataloader.sampler.set_epoch(epoch) + # update training progress in trainer and model + task_ref.current_epoch = epoch + self.current_epoch = epoch + # total batches includes multiple val checks + self.batch_loss_value = 0 # accumulated grads + # before epoch hook + task_ref.on_epoch_start() + + # run epoch + train_pbar = tqdm.tqdm(dataloader, initial=self.global_step, total=float('inf'), + dynamic_ncols=True, unit='step', disable=self.root_gpu > 0) + for batch_idx, batch in enumerate(train_pbar): + if self.global_step % self.val_check_interval == 0 and not self.fisrt_epoch: + self.run_evaluation() + pbar_metrics, tb_metrics = self.run_training_batch(batch_idx, batch) + train_pbar.set_postfix(**pbar_metrics) + self.fisrt_epoch = False + # when metrics should be logged + if (self.global_step + 1) % self.tb_log_interval == 0: + # logs user requested information to logger + self.log_metrics_to_tb(tb_metrics) + + self.global_step += 1 + task_ref.global_step = self.global_step + if self.global_step > self.max_updates: + print("| Training end..") + break + # epoch end hook + task_ref.on_epoch_end() + epoch += 1 + if self.global_step > self.max_updates: + break + task_ref.on_train_end() + + def run_training_batch(self, batch_idx, batch): + if batch is None: + return {} + all_progress_bar_metrics = [] + all_log_metrics = [] + task_ref = self.get_task_ref() + for opt_idx, optimizer in enumerate(self.optimizers): + if optimizer is None: + continue + # make sure only the gradients of the current optimizer's paramaters are calculated + # in the training step to prevent dangling gradients in multiple-optimizer setup. + if len(self.optimizers) > 1: + for param in task_ref.parameters(): + param.requires_grad = False + for group in optimizer.param_groups: + for param in group['params']: + param.requires_grad = True + + # forward pass + with autocast(enabled=self.amp): + if self.on_gpu: + batch = move_to_cuda(copy.copy(batch), self.root_gpu) + args = [batch, batch_idx, opt_idx] + if self.use_ddp: + output = self.task(*args) + else: + output = task_ref.training_step(*args) + loss = output['loss'] + if loss is None: + continue + progress_bar_metrics = output['progress_bar'] + log_metrics = output['tb_log'] + # accumulate loss + loss = loss / self.accumulate_grad_batches + + # backward pass + if loss.requires_grad: + if self.amp: + self.amp_scalar.scale(loss).backward() + else: + loss.backward() + + # track progress bar metrics + all_log_metrics.append(log_metrics) + all_progress_bar_metrics.append(progress_bar_metrics) + + if loss is None: + continue + + # nan grads + if self.print_nan_grads: + has_nan_grad = False + for name, param in task_ref.named_parameters(): + if (param.grad is not None) and torch.isnan(param.grad.float()).any(): + print("| NaN params: ", name, param, param.grad) + has_nan_grad = True + if has_nan_grad: + exit(0) + + # gradient update with accumulated gradients + if (self.global_step + 1) % self.accumulate_grad_batches == 0: + grad_norm_dict = task_ref.on_before_optimization(opt_idx) + if grad_norm_dict is not None: + all_log_metrics[-1].update(grad_norm_dict) + if self.amp: + self.amp_scalar.step(optimizer) + self.amp_scalar.update() + else: + optimizer.step() + optimizer.zero_grad() + task_ref.on_after_optimization(self.current_epoch, batch_idx, optimizer, opt_idx) + + # collapse all metrics into one dict + all_progress_bar_metrics = {k: v for d in all_progress_bar_metrics for k, v in d.items()} + all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()} + return all_progress_bar_metrics, all_log_metrics + + #################### + # load and save checkpoint + #################### + def restore_weights(self, checkpoint): + # load model state + task_ref = self.get_task_ref() + + for k, v in checkpoint['state_dict'].items(): + getattr(task_ref, k).load_state_dict(v) + + if self.on_gpu: + task_ref.cuda(self.root_gpu) + # load training state (affects trainer only) + self.best_val_results = checkpoint['checkpoint_callback_best'] + self.global_step = checkpoint['global_step'] + self.current_epoch = checkpoint['epoch'] + task_ref.global_step = self.global_step + + # wait for all models to restore weights + if self.use_ddp: + # wait for all processes to catch up + dist.barrier() + + def restore_opt_state(self, checkpoint): + if self.testing: + return + # restore the optimizers + optimizer_states = checkpoint['optimizer_states'] + for optimizer, opt_state in zip(self.optimizers, optimizer_states): + if optimizer is None: + return + try: + optimizer.load_state_dict(opt_state) + # move optimizer to GPU 1 weight at a time + if self.on_gpu: + for state in optimizer.state.values(): + for k, v in state.items(): + if isinstance(v, torch.Tensor): + state[k] = v.cuda(self.root_gpu) + except ValueError: + print("| WARMING: optimizer parameters not match !!!") + try: + if dist.is_initialized() and dist.get_rank() > 0: + return + except Exception as e: + print(e) + return + did_restore = True + return did_restore + + def save_checkpoint(self, epoch, logs=None): + monitor_op = np.less + ckpt_path = f'{self.work_dir}/model_ckpt_steps_{self.global_step}.ckpt' + logging.info(f'Epoch {epoch:05d}@{self.global_step}: saving model to {ckpt_path}') + self._atomic_save(ckpt_path) + for old_ckpt in get_all_ckpts(self.work_dir)[self.num_ckpt_keep:]: + remove_file(old_ckpt) + logging.info(f'Delete ckpt: {os.path.basename(old_ckpt)}') + current = None + if logs is not None and self.monitor_key in logs: + current = logs[self.monitor_key] + if current is not None and self.save_best: + if monitor_op(current, self.best_val_results): + best_filepath = f'{self.work_dir}/model_ckpt_best.pt' + self.best_val_results = current + logging.info( + f'Epoch {epoch:05d}@{self.global_step}: {self.monitor_key} reached {current:0.5f}. ' + f'Saving model to {best_filepath}') + self._atomic_save(best_filepath) + + def _atomic_save(self, filepath): + checkpoint = self.dump_checkpoint() + tmp_path = str(filepath) + ".part" + torch.save(checkpoint, tmp_path, _use_new_zipfile_serialization=False) + os.replace(tmp_path, filepath) + + def dump_checkpoint(self): + checkpoint = {'epoch': self.current_epoch, 'global_step': self.global_step, + 'checkpoint_callback_best': self.best_val_results} + # save optimizers + optimizer_states = [] + for i, optimizer in enumerate(self.optimizers): + if optimizer is not None: + optimizer_states.append(optimizer.state_dict()) + + checkpoint['optimizer_states'] = optimizer_states + task_ref = self.get_task_ref() + checkpoint['state_dict'] = { + k: v.state_dict() for k, v in task_ref.named_children() if len(list(v.parameters())) > 0} + return checkpoint + + #################### + # DDP + #################### + def configure_ddp(self, task): + task = DDP(task, device_ids=[self.root_gpu], find_unused_parameters=True) + random.seed(self.seed) + np.random.seed(self.seed) + return task + + def init_ddp_connection(self, proc_rank, world_size): + root_node = '127.0.0.1' + root_node = self.resolve_root_node_address(root_node) + os.environ['MASTER_ADDR'] = root_node + dist.init_process_group('nccl', rank=proc_rank, world_size=world_size) + + def resolve_root_node_address(self, root_node): + if '[' in root_node: + name = root_node.split('[')[0] + number = root_node.split(',')[0] + if '-' in number: + number = number.split('-')[0] + number = re.sub('[^0-9]', '', number) + root_node = name + number + return root_node + + #################### + # utils + #################### + def get_task_ref(self): + from text_to_speech.utils.commons.base_task import BaseTask + task: BaseTask = self.task.module if isinstance(self.task, DDP) else self.task + return task + + def log_metrics_to_tb(self, metrics, step=None): + """Logs the metric dict passed in. + + :param metrics: + """ + # turn all tensors to scalars + scalar_metrics = self.metrics_to_scalars(metrics) + + step = step if step is not None else self.global_step + # log actual metrics + if self.proc_rank == 0: + self.log_metrics(self.logger, scalar_metrics, step=step) + + @staticmethod + def log_metrics(logger, metrics, step=None): + for k, v in metrics.items(): + if isinstance(v, torch.Tensor): + v = v.item() + logger.add_scalar(k, v, step) + + def metrics_to_scalars(self, metrics): + new_metrics = {} + for k, v in metrics.items(): + if isinstance(v, torch.Tensor): + v = v.item() + + if type(v) is dict: + v = self.metrics_to_scalars(v) + + new_metrics[k] = v + + return new_metrics + + def save_terminal_logs(self): + t = datetime.now().strftime('%Y%m%d%H%M%S') + os.makedirs(f'{self.work_dir}/terminal_logs', exist_ok=True) + Tee(f'{self.work_dir}/terminal_logs/log_{t}.txt', 'w') + + def save_codes(self): + if len(hparams['save_codes']) > 0: + t = datetime.now().strftime('%Y%m%d%H%M%S') + code_dir = f'{self.work_dir}/codes/{t}' + subprocess.check_call(f'mkdir -p "{code_dir}"', shell=True) + for c in hparams['save_codes']: + if os.path.exists(c): + subprocess.check_call( + f'rsync -aR ' + f'--include="*.py" ' + f'--include="*.yaml" ' + f'--exclude="__pycache__" ' + f'--include="*/" ' + f'--exclude="*" ' + f'"./{c}" "{code_dir}/"', + shell=True) + print(f"| Copied codes to {code_dir}.") diff --git a/text_to_speech/utils/metrics/diagonal_metrics.py b/text_to_speech/utils/metrics/diagonal_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..ba9807c1a594b38632c4731391e2d4fa3289037b --- /dev/null +++ b/text_to_speech/utils/metrics/diagonal_metrics.py @@ -0,0 +1,74 @@ +import torch + + +def get_focus_rate(attn, src_padding_mask=None, tgt_padding_mask=None): + ''' + attn: bs x L_t x L_s + ''' + if src_padding_mask is not None: + attn = attn * (1 - src_padding_mask.float())[:, None, :] + + if tgt_padding_mask is not None: + attn = attn * (1 - tgt_padding_mask.float())[:, :, None] + + focus_rate = attn.max(-1).values.sum(-1) + focus_rate = focus_rate / attn.sum(-1).sum(-1) + return focus_rate + + +def get_phone_coverage_rate(attn, src_padding_mask=None, src_seg_mask=None, tgt_padding_mask=None): + ''' + attn: bs x L_t x L_s + ''' + src_mask = attn.new(attn.size(0), attn.size(-1)).bool().fill_(False) + if src_padding_mask is not None: + src_mask |= src_padding_mask + if src_seg_mask is not None: + src_mask |= src_seg_mask + + attn = attn * (1 - src_mask.float())[:, None, :] + if tgt_padding_mask is not None: + attn = attn * (1 - tgt_padding_mask.float())[:, :, None] + + phone_coverage_rate = attn.max(1).values.sum(-1) + # phone_coverage_rate = phone_coverage_rate / attn.sum(-1).sum(-1) + phone_coverage_rate = phone_coverage_rate / (1 - src_mask.float()).sum(-1) + return phone_coverage_rate + + +def get_diagonal_focus_rate(attn, attn_ks, target_len, src_padding_mask=None, tgt_padding_mask=None, + band_mask_factor=5, band_width=50): + ''' + attn: bx x L_t x L_s + attn_ks: shape: tensor with shape [batch_size], input_lens/output_lens + + diagonal: y=k*x (k=attn_ks, x:output, y:input) + 1 0 0 + 0 1 0 + 0 0 1 + y>=k*(x-width) and y<=k*(x+width):1 + else:0 + ''' + # width = min(target_len/band_mask_factor, 50) + width1 = target_len / band_mask_factor + width2 = target_len.new(target_len.size()).fill_(band_width) + width = torch.where(width1 < width2, width1, width2).float() + base = torch.ones(attn.size()).to(attn.device) + zero = torch.zeros(attn.size()).to(attn.device) + x = torch.arange(0, attn.size(1)).to(attn.device)[None, :, None].float() * base + y = torch.arange(0, attn.size(2)).to(attn.device)[None, None, :].float() * base + cond = (y - attn_ks[:, None, None] * x) + cond1 = cond + attn_ks[:, None, None] * width[:, None, None] + cond2 = cond - attn_ks[:, None, None] * width[:, None, None] + mask1 = torch.where(cond1 < 0, zero, base) + mask2 = torch.where(cond2 > 0, zero, base) + mask = mask1 * mask2 + + if src_padding_mask is not None: + attn = attn * (1 - src_padding_mask.float())[:, None, :] + if tgt_padding_mask is not None: + attn = attn * (1 - tgt_padding_mask.float())[:, :, None] + + diagonal_attn = attn * mask + diagonal_focus_rate = diagonal_attn.sum(-1).sum(-1) / attn.sum(-1).sum(-1) + return diagonal_focus_rate, mask diff --git a/text_to_speech/utils/metrics/dtw.py b/text_to_speech/utils/metrics/dtw.py new file mode 100644 index 0000000000000000000000000000000000000000..464c4b747d792d23cf413675a47c9dddf67da134 --- /dev/null +++ b/text_to_speech/utils/metrics/dtw.py @@ -0,0 +1,162 @@ +from numpy import array, zeros, full, argmin, inf, ndim +from scipy.spatial.distance import cdist +from math import isinf + + +def dtw(x, y, dist, warp=1, w=inf, s=1.0): + """ + Computes Dynamic Time Warping (DTW) of two sequences. + + :param array x: N1*M array + :param array y: N2*M array + :param func dist: distance used as cost measure + :param int warp: how many shifts are computed. + :param int w: window size limiting the maximal distance between indices of matched entries |i,j|. + :param float s: weight applied on off-diagonal moves of the path. As s gets larger, the warping path is increasingly biased towards the diagonal + Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path. + """ + assert len(x) + assert len(y) + assert isinf(w) or (w >= abs(len(x) - len(y))) + assert s > 0 + r, c = len(x), len(y) + if not isinf(w): + D0 = full((r + 1, c + 1), inf) + for i in range(1, r + 1): + D0[i, max(1, i - w):min(c + 1, i + w + 1)] = 0 + D0[0, 0] = 0 + else: + D0 = zeros((r + 1, c + 1)) + D0[0, 1:] = inf + D0[1:, 0] = inf + D1 = D0[1:, 1:] # view + for i in range(r): + for j in range(c): + if (isinf(w) or (max(0, i - w) <= j <= min(c, i + w))): + D1[i, j] = dist(x[i], y[j]) + C = D1.copy() + jrange = range(c) + for i in range(r): + if not isinf(w): + jrange = range(max(0, i - w), min(c, i + w + 1)) + for j in jrange: + min_list = [D0[i, j]] + for k in range(1, warp + 1): + i_k = min(i + k, r) + j_k = min(j + k, c) + min_list += [D0[i_k, j] * s, D0[i, j_k] * s] + D1[i, j] += min(min_list) + if len(x) == 1: + path = zeros(len(y)), range(len(y)) + elif len(y) == 1: + path = range(len(x)), zeros(len(x)) + else: + path = _traceback(D0) + return D1[-1, -1], C, D1, path + + +def accelerated_dtw(x, y, dist, warp=1): + """ + Computes Dynamic Time Warping (DTW) of two sequences in a faster way. + Instead of iterating through each element and calculating each distance, + this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html) + + :param array x: N1*M array + :param array y: N2*M array + :param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics. + If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'. + :param int warp: how many shifts are computed. + Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path. + """ + assert len(x) + assert len(y) + if ndim(x) == 1: + x = x.reshape(-1, 1) + if ndim(y) == 1: + y = y.reshape(-1, 1) + r, c = len(x), len(y) + D0 = zeros((r + 1, c + 1)) + D0[0, 1:] = inf + D0[1:, 0] = inf + D1 = D0[1:, 1:] + D0[1:, 1:] = cdist(x, y, dist) + C = D1.copy() + for i in range(r): + for j in range(c): + min_list = [D0[i, j]] + for k in range(1, warp + 1): + min_list += [D0[min(i + k, r), j], + D0[i, min(j + k, c)]] + D1[i, j] += min(min_list) + if len(x) == 1: + path = zeros(len(y)), range(len(y)) + elif len(y) == 1: + path = range(len(x)), zeros(len(x)) + else: + path = _traceback(D0) + return D1[-1, -1], C, D1, path + + +def _traceback(D): + i, j = array(D.shape) - 2 + p, q = [i], [j] + while (i > 0) or (j > 0): + tb = argmin((D[i, j], D[i, j + 1], D[i + 1, j])) + if tb == 0: + i -= 1 + j -= 1 + elif tb == 1: + i -= 1 + else: # (tb == 2): + j -= 1 + p.insert(0, i) + q.insert(0, j) + return array(p), array(q) + + +if __name__ == '__main__': + w = inf + s = 1.0 + if 1: # 1-D numeric + from sklearn.metrics.pairwise import manhattan_distances + import numpy as np + x = [0, 0, 1, 1, 2, 4, 2, 1, 2, 0] + x = np.array(x).reshape([-1,1,1]) + y = [1, 1, 1, 2, 2, 2, 2, 3, 2, 0] + y = np.array(y).reshape([-1,1,1]) + dist_fun = manhattan_distances + w = 1 + # s = 1.2 + elif 0: # 2-D numeric + from sklearn.metrics.pairwise import euclidean_distances + + x = [[0, 0], [0, 1], [1, 1], [1, 2], [2, 2], [4, 3], [2, 3], [1, 1], [2, 2], [0, 1]] + y = [[1, 0], [1, 1], [1, 1], [2, 1], [4, 3], [4, 3], [2, 3], [3, 1], [1, 2], [1, 0]] + dist_fun = euclidean_distances + else: # 1-D list of strings + from nltk.metrics.distance import edit_distance + + # x = ['we', 'shelled', 'clams', 'for', 'the', 'chowder'] + # y = ['class', 'too'] + x = ['i', 'soon', 'found', 'myself', 'muttering', 'to', 'the', 'walls'] + y = ['see', 'drown', 'himself'] + # x = 'we talked about the situation'.split() + # y = 'we talked about the situation'.split() + dist_fun = edit_distance + dist, cost, acc, path = dtw(x, y, dist_fun, w=w, s=s) + + # Vizualize + from matplotlib import pyplot as plt + + plt.imshow(cost.T, origin='lower', cmap=plt.cm.Reds, interpolation='nearest') + plt.plot(path[0], path[1], '-o') # relation + plt.xticks(range(len(x)), x) + plt.yticks(range(len(y)), y) + plt.xlabel('x') + plt.ylabel('y') + plt.axis('tight') + if isinf(w): + plt.title('Minimum distance: {}, slope weight: {}'.format(dist, s)) + else: + plt.title('Minimum distance: {}, window widht: {}, slope weight: {}'.format(dist, w, s)) + plt.show() diff --git a/text_to_speech/utils/metrics/laplace_var.py b/text_to_speech/utils/metrics/laplace_var.py new file mode 100644 index 0000000000000000000000000000000000000000..ec6f5f8d877195e7ee512d7e9f6f8a879d3ef32c --- /dev/null +++ b/text_to_speech/utils/metrics/laplace_var.py @@ -0,0 +1,4 @@ +import scipy.ndimage + +def laplace_var(x): + return scipy.ndimage.laplace(x).var() diff --git a/text_to_speech/utils/metrics/pitch_distance.py b/text_to_speech/utils/metrics/pitch_distance.py new file mode 100644 index 0000000000000000000000000000000000000000..b8c8e06795758363639eb8cbda6bf1e05bf54ba6 --- /dev/null +++ b/text_to_speech/utils/metrics/pitch_distance.py @@ -0,0 +1,102 @@ +import numpy as np +import matplotlib.pyplot as plt +from numba import jit + +import torch + + +@jit +def time_warp(costs): + dtw = np.zeros_like(costs) + dtw[0, 1:] = np.inf + dtw[1:, 0] = np.inf + eps = 1e-4 + for i in range(1, costs.shape[0]): + for j in range(1, costs.shape[1]): + dtw[i, j] = costs[i, j] + min(dtw[i - 1, j], dtw[i, j - 1], dtw[i - 1, j - 1]) + return dtw + + +def align_from_distances(distance_matrix, debug=False, return_mindist=False): + # for each position in spectrum 1, returns best match position in spectrum2 + # using monotonic alignment + dtw = time_warp(distance_matrix) + + i = distance_matrix.shape[0] - 1 + j = distance_matrix.shape[1] - 1 + results = [0] * distance_matrix.shape[0] + while i > 0 and j > 0: + results[i] = j + i, j = min([(i - 1, j), (i, j - 1), (i - 1, j - 1)], key=lambda x: dtw[x[0], x[1]]) + + if debug: + visual = np.zeros_like(dtw) + visual[range(len(results)), results] = 1 + plt.matshow(visual) + plt.show() + if return_mindist: + return results, dtw[-1, -1] + return results + + +def get_local_context(input_f, max_window=32, scale_factor=1.): + # input_f: [S, 1], support numpy array or torch tensor + # return hist: [S, max_window * 2], list of list + T = input_f.shape[0] + # max_window = int(max_window * scale_factor) + derivative = [[0 for _ in range(max_window * 2)] for _ in range(T)] + + for t in range(T): # travel the time series + for feat_idx in range(-max_window, max_window): + if t + feat_idx < 0 or t + feat_idx >= T: + value = 0 + else: + value = input_f[t + feat_idx] + derivative[t][feat_idx + max_window] = value + return derivative + + +def cal_localnorm_dist(src, tgt, src_len, tgt_len): + local_src = torch.tensor(get_local_context(src)) + local_tgt = torch.tensor(get_local_context(tgt, scale_factor=tgt_len / src_len)) + + local_norm_src = (local_src - local_src.mean(-1).unsqueeze(-1)) # / local_src.std(-1).unsqueeze(-1) # [T1, 32] + local_norm_tgt = (local_tgt - local_tgt.mean(-1).unsqueeze(-1)) # / local_tgt.std(-1).unsqueeze(-1) # [T2, 32] + + dists = torch.cdist(local_norm_src[None, :, :], local_norm_tgt[None, :, :]) # [1, T1, T2] + return dists + + +## here is API for one sample +def LoNDTWDistance(src, tgt): + # src: [S] + # tgt: [T] + dists = cal_localnorm_dist(src, tgt, src.shape[0], tgt.shape[0]) # [1, S, T] + costs = dists.squeeze(0) # [S, T] + alignment, min_distance = align_from_distances(costs.T.cpu().detach().numpy(), return_mindist=True) # [T] + return alignment, min_distance + +# if __name__ == '__main__': +# # utils from ns +# from text_to_speech.utils.pitch_utils import denorm_f0 +# from tasks.singing.fsinging import FastSingingDataset +# from text_to_speech.utils.hparams import hparams, set_hparams +# +# set_hparams() +# +# train_ds = FastSingingDataset('test') +# +# # Test One sample case +# sample = train_ds[0] +# amateur_f0 = sample['f0'] +# prof_f0 = sample['prof_f0'] +# +# amateur_uv = sample['uv'] +# amateur_padding = sample['mel2ph'] == 0 +# prof_uv = sample['prof_uv'] +# prof_padding = sample['prof_mel2ph'] == 0 +# amateur_f0_denorm = denorm_f0(amateur_f0, amateur_uv, hparams, pitch_padding=amateur_padding) +# prof_f0_denorm = denorm_f0(prof_f0, prof_uv, hparams, pitch_padding=prof_padding) +# alignment, min_distance = LoNDTWDistance(amateur_f0_denorm, prof_f0_denorm) +# print(min_distance) +# python utils/pitch_distance.py --config egs/datasets/audio/molar/svc_ppg.yaml diff --git a/text_to_speech/utils/metrics/ssim.py b/text_to_speech/utils/metrics/ssim.py new file mode 100644 index 0000000000000000000000000000000000000000..cb8c6a47b14fbd450a6717a21236906d6de9679f --- /dev/null +++ b/text_to_speech/utils/metrics/ssim.py @@ -0,0 +1,84 @@ +""" +Adapted from https://github.com/Po-Hsun-Su/pytorch-ssim +""" + +import torch +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +from math import exp + + +def gaussian(window_size, sigma): + gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)]) + return gauss / gauss.sum() + + +def create_window(window_size, channel): + _1D_window = gaussian(window_size, 1.5).unsqueeze(1) + _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) + window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous()) + return window + + +def _ssim(img1, img2, window, window_size, channel, size_average=True): + mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel) + mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel) + + mu1_sq = mu1.pow(2) + mu2_sq = mu2.pow(2) + mu1_mu2 = mu1 * mu2 + + sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq + sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq + sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2 + + C1 = 0.01 ** 2 + C2 = 0.03 ** 2 + + ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) + + if size_average: + return ssim_map.mean() + else: + return ssim_map.mean(1) + + +class SSIM(torch.nn.Module): + def __init__(self, window_size=11, size_average=True): + super(SSIM, self).__init__() + self.window_size = window_size + self.size_average = size_average + self.channel = 1 + self.window = create_window(window_size, self.channel) + + def forward(self, img1, img2): + (_, channel, _, _) = img1.size() + + if channel == self.channel and self.window.data.type() == img1.data.type(): + window = self.window + else: + window = create_window(self.window_size, channel) + + if img1.is_cuda: + window = window.cuda(img1.get_device()) + window = window.type_as(img1) + + self.window = window + self.channel = channel + + return _ssim(img1, img2, window, self.window_size, channel, self.size_average) + + +window = None + + +def ssim(img1, img2, window_size=11, size_average=True): + (_, channel, _, _) = img1.size() + global window + if window is None: + window = create_window(window_size, channel) + if img1.is_cuda: + window = window.cuda(img1.get_device()) + window = window.type_as(img1) + return _ssim(img1, img2, window, window_size, channel, size_average) diff --git a/text_to_speech/utils/nn/__pycache__/seq_utils.cpython-38.pyc b/text_to_speech/utils/nn/__pycache__/seq_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a946f7a7d1b4f2cf9d579bf8afcebe990a3ff05c Binary files /dev/null and b/text_to_speech/utils/nn/__pycache__/seq_utils.cpython-38.pyc differ diff --git a/text_to_speech/utils/nn/model_utils.py b/text_to_speech/utils/nn/model_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d5d7adc5ccaa5d2979dc2e729b6fc01fecbb3947 --- /dev/null +++ b/text_to_speech/utils/nn/model_utils.py @@ -0,0 +1,49 @@ +import numpy as np +import torch + +def print_arch(model, model_name='model'): + print(f"| {model_name} Arch: ", model) + num_params(model, model_name=model_name) + + +def num_params(model, print_out=True, model_name="model"): + parameters = filter(lambda p: p.requires_grad, model.parameters()) + parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 + if print_out: + print(f'| {model_name} Trainable Parameters: %.3fM' % parameters) + return parameters + +def requires_grad(model): + if isinstance(model, torch.nn.Module): + for p in model.parameters(): + p.requires_grad = True + else: + model.requires_grad = True + +def not_requires_grad(model): + if isinstance(model, torch.nn.Module): + for p in model.parameters(): + p.requires_grad = False + else: + model.requires_grad = False + +def get_grad_norm(model, l=2): + num_para = 0 + accu_grad = 0 + if isinstance(model, torch.nn.Module): + params = model.parameters() + else: + params = model + for p in params: + if p.grad is None: + continue + num_para += p.numel() + if l == 1: + accu_grad += p.grad.abs(1).sum() + elif l == 2: + accu_grad += p.grad.pow(2).sum() + else: + raise ValueError("Now we only implement l1/l2 norm !") + if l == 2: + accu_grad = accu_grad ** 0.5 + return accu_grad \ No newline at end of file diff --git a/text_to_speech/utils/nn/schedulers.py b/text_to_speech/utils/nn/schedulers.py new file mode 100644 index 0000000000000000000000000000000000000000..ce7494b136a77d7724ab97ae2cdc56e6c9694214 --- /dev/null +++ b/text_to_speech/utils/nn/schedulers.py @@ -0,0 +1,105 @@ +import numpy as np + + +class NoneSchedule(object): + def __init__(self, optimizer, lr): + self.optimizer = optimizer + self.constant_lr = lr + self.step(0) + + def step(self, num_updates): + self.lr = self.constant_lr + for param_group in self.optimizer.param_groups: + param_group['lr'] = self.lr + return self.lr + + def get_lr(self): + return self.optimizer.param_groups[0]['lr'] + + def get_last_lr(self): + return self.get_lr() + + +class RSQRTSchedule(NoneSchedule): + def __init__(self, optimizer, lr, warmup_updates, hidden_size): + self.optimizer = optimizer + self.constant_lr = lr + self.warmup_updates = warmup_updates + self.hidden_size = hidden_size + self.lr = lr + for param_group in optimizer.param_groups: + param_group['lr'] = self.lr + self.step(0) + + def step(self, num_updates): + constant_lr = self.constant_lr + warmup = min(num_updates / self.warmup_updates, 1.0) + rsqrt_decay = max(self.warmup_updates, num_updates) ** -0.5 + rsqrt_hidden = self.hidden_size ** -0.5 + self.lr = max(constant_lr * warmup * rsqrt_decay * rsqrt_hidden, 1e-6) + for param_group in self.optimizer.param_groups: + param_group['lr'] = self.lr + return self.lr + + +class WarmupSchedule(NoneSchedule): + def __init__(self, optimizer, lr, warmup_updates): + self.optimizer = optimizer + self.constant_lr = self.lr = lr + self.warmup_updates = warmup_updates + for param_group in optimizer.param_groups: + param_group['lr'] = self.lr + self.step(0) + + def step(self, num_updates): + constant_lr = self.constant_lr + warmup = min(num_updates / self.warmup_updates, 1.0) + self.lr = max(constant_lr * warmup, 1e-7) + for param_group in self.optimizer.param_groups: + param_group['lr'] = self.lr + return self.lr + + +class CosineSchedule(NoneSchedule): + def __init__(self, optimizer, lr, warmup_updates, total_updates): + self.optimizer = optimizer + self.constant_lr = lr + self.warmup_updates = warmup_updates + self.total_updates = total_updates + self.lr = lr + self.assign_learning_rate(self.optimizer, self.lr) + self.step(0) + + def assign_learning_rate(self, optimizer, new_lr): + for param_group in optimizer.param_groups: + param_group["lr"] = new_lr + + def _warmup_lr(self, base_lr, warmup_length, step): + return base_lr * (step + 1) / warmup_length + + def step(self, num_updates): + if num_updates < self.warmup_updates: + lr = self._warmup_lr(self.lr, self.warmup_updates, num_updates) + else: + e = num_updates - self.warmup_updates + es = self.total_updates - self.warmup_updates + lr = 0.5 * (1 + np.cos(np.pi * e / es)) * self.lr + self.assign_learning_rate(self.optimizer, lr) + return lr + + +if __name__ == '__main__': + import numpy as np + import matplotlib.pyplot as plt + import torch + + def plot_scheduler(scheduler, label=None): + y = np.array([scheduler.step(x) for x in range(0,160000, 10)]) + x = np.arange(0,160000, 10) + plt.plot(x, y, label=label) + + dummy_model = torch.nn.Linear(10,10) + dummy_optimizer = torch.optim.Adam(dummy_model.parameters()) + rsqrt = CosineSchedule(dummy_optimizer, lr=0.0005, warmup_updates=10000, total_updates=160000) + plot_scheduler(rsqrt, "8000") + plt.savefig("0.png") diff --git a/text_to_speech/utils/nn/seq_utils.py b/text_to_speech/utils/nn/seq_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..695a478212bc0384e59ce0e08d0f06be01eca370 --- /dev/null +++ b/text_to_speech/utils/nn/seq_utils.py @@ -0,0 +1,311 @@ +from collections import defaultdict +import torch +import torch.nn.functional as F + + +def make_positions(tensor, padding_idx): + """Replace non-padding symbols with their position numbers. + + Position numbers begin at padding_idx+1. Padding symbols are ignored. + """ + # The series of casts and type-conversions here are carefully + # balanced to both work with ONNX export and XLA. In particular XLA + # prefers ints, cumsum defaults to output longs, and ONNX doesn't know + # how to handle the dtype kwarg in cumsum. + mask = tensor.ne(padding_idx).int() + return ( + torch.cumsum(mask, dim=1).type_as(mask) * mask + ).long() + padding_idx + + +def softmax(x, dim): + return F.softmax(x, dim=dim, dtype=torch.float32) + + +def sequence_mask(lengths, maxlen, dtype=torch.bool): + if maxlen is None: + maxlen = lengths.max() + mask = ~(torch.ones((len(lengths), maxlen)).to(lengths.device).cumsum(dim=1).t() > lengths).t() + mask.type(dtype) + return mask + + +def weights_nonzero_speech(target): + # target : B x T x mel + # Assign weight 1.0 to all labels except for padding (id=0). + dim = target.size(-1) + return target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim) + + +INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0) + + +def _get_full_incremental_state_key(module_instance, key): + module_name = module_instance.__class__.__name__ + + # assign a unique ID to each module instance, so that incremental state is + # not shared across module instances + if not hasattr(module_instance, '_instance_id'): + INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1 + module_instance._instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name] + + return '{}.{}.{}'.format(module_name, module_instance._instance_id, key) + + +def get_incremental_state(module, incremental_state, key): + """Helper for getting incremental state for an nn.Module.""" + full_key = _get_full_incremental_state_key(module, key) + if incremental_state is None or full_key not in incremental_state: + return None + return incremental_state[full_key] + + +def set_incremental_state(module, incremental_state, key, value): + """Helper for setting incremental state for an nn.Module.""" + if incremental_state is not None: + full_key = _get_full_incremental_state_key(module, key) + incremental_state[full_key] = value + + +def fill_with_neg_inf(t): + """FP16-compatible function that fills a tensor with -inf.""" + return t.float().fill_(float('-inf')).type_as(t) + + +def fill_with_neg_inf2(t): + """FP16-compatible function that fills a tensor with -inf.""" + return t.float().fill_(-1e8).type_as(t) + + +def select_attn(attn_logits, type='best'): + """ + + :param attn_logits: [n_layers, B, n_head, T_sp, T_txt] + :return: + """ + encdec_attn = torch.stack(attn_logits, 0).transpose(1, 2) + # [n_layers * n_head, B, T_sp, T_txt] + encdec_attn = (encdec_attn.reshape([-1, *encdec_attn.shape[2:]])).softmax(-1) + if type == 'best': + indices = encdec_attn.max(-1).values.sum(-1).argmax(0) + encdec_attn = encdec_attn.gather( + 0, indices[None, :, None, None].repeat(1, 1, encdec_attn.size(-2), encdec_attn.size(-1)))[0] + return encdec_attn + elif type == 'mean': + return encdec_attn.mean(0) + + +def make_pad_mask(lengths, xs=None, length_dim=-1): + """Make mask tensor containing indices of padded part. + Args: + lengths (LongTensor or List): Batch of lengths (B,). + xs (Tensor, optional): The reference tensor. + If set, masks will be the same shape as this tensor. + length_dim (int, optional): Dimension indicator of the above tensor. + See the example. + Returns: + Tensor: Mask tensor containing indices of padded part. + dtype=torch.uint8 in PyTorch 1.2- + dtype=torch.bool in PyTorch 1.2+ (including 1.2) + Examples: + With only lengths. + >>> lengths = [5, 3, 2] + >>> make_non_pad_mask(lengths) + masks = [[0, 0, 0, 0 ,0], + [0, 0, 0, 1, 1], + [0, 0, 1, 1, 1]] + With the reference tensor. + >>> xs = torch.zeros((3, 2, 4)) + >>> make_pad_mask(lengths, xs) + tensor([[[0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 0, 0, 1], + [0, 0, 0, 1]], + [[0, 0, 1, 1], + [0, 0, 1, 1]]], dtype=torch.uint8) + >>> xs = torch.zeros((3, 2, 6)) + >>> make_pad_mask(lengths, xs) + tensor([[[0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1]], + [[0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1]], + [[0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8) + With the reference tensor and dimension indicator. + >>> xs = torch.zeros((3, 6, 6)) + >>> make_pad_mask(lengths, xs, 1) + tensor([[[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1]], + [[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1]], + [[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8) + >>> make_pad_mask(lengths, xs, 2) + tensor([[[0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1]], + [[0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1]], + [[0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8) + """ + if length_dim == 0: + raise ValueError("length_dim cannot be 0: {}".format(length_dim)) + + if not isinstance(lengths, list): + lengths = lengths.tolist() + bs = int(len(lengths)) + if xs is None: + maxlen = int(max(lengths)) + else: + maxlen = xs.size(length_dim) + + seq_range = torch.arange(0, maxlen, dtype=torch.int64) + seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen) + seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1) + mask = seq_range_expand >= seq_length_expand + + if xs is not None: + assert xs.size(0) == bs, (xs.size(0), bs) + + if length_dim < 0: + length_dim = xs.dim() + length_dim + # ind = (:, None, ..., None, :, , None, ..., None) + ind = tuple( + slice(None) if i in (0, length_dim) else None for i in range(xs.dim()) + ) + mask = mask[ind].expand_as(xs).to(xs.device) + return mask + + +def make_non_pad_mask(lengths, xs=None, length_dim=-1): + """Make mask tensor containing indices of non-padded part. + Args: + lengths (LongTensor or List): Batch of lengths (B,). + xs (Tensor, optional): The reference tensor. + If set, masks will be the same shape as this tensor. + length_dim (int, optional): Dimension indicator of the above tensor. + See the example. + Returns: + ByteTensor: mask tensor containing indices of padded part. + dtype=torch.uint8 in PyTorch 1.2- + dtype=torch.bool in PyTorch 1.2+ (including 1.2) + Examples: + With only lengths. + >>> lengths = [5, 3, 2] + >>> make_non_pad_mask(lengths) + masks = [[1, 1, 1, 1 ,1], + [1, 1, 1, 0, 0], + [1, 1, 0, 0, 0]] + With the reference tensor. + >>> xs = torch.zeros((3, 2, 4)) + >>> make_non_pad_mask(lengths, xs) + tensor([[[1, 1, 1, 1], + [1, 1, 1, 1]], + [[1, 1, 1, 0], + [1, 1, 1, 0]], + [[1, 1, 0, 0], + [1, 1, 0, 0]]], dtype=torch.uint8) + >>> xs = torch.zeros((3, 2, 6)) + >>> make_non_pad_mask(lengths, xs) + tensor([[[1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0]], + [[1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0]], + [[1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8) + With the reference tensor and dimension indicator. + >>> xs = torch.zeros((3, 6, 6)) + >>> make_non_pad_mask(lengths, xs, 1) + tensor([[[1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0]], + [[1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]], + [[1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]], dtype=torch.uint8) + >>> make_non_pad_mask(lengths, xs, 2) + tensor([[[1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0]], + [[1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0]], + [[1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8) + """ + return ~make_pad_mask(lengths, xs, length_dim) + + +def get_mask_from_lengths(lengths): + max_len = torch.max(lengths).item() + ids = torch.arange(0, max_len).to(lengths.device) + mask = (ids < lengths.unsqueeze(1)).bool() + return mask + + +def group_hidden_by_segs(h, seg_ids, max_len): + """ + + :param h: [B, T, H] + :param seg_ids: [B, T] + :return: h_ph: [B, T_ph, H] + """ + B, T, H = h.shape + h_gby_segs = h.new_zeros([B, max_len + 1, H]).scatter_add_(1, seg_ids[:, :, None].repeat([1, 1, H]), h) + all_ones = h.new_ones(h.shape[:2]) + cnt_gby_segs = h.new_zeros([B, max_len + 1]).scatter_add_(1, seg_ids, all_ones).contiguous() + h_gby_segs = h_gby_segs[:, 1:] + cnt_gby_segs = cnt_gby_segs[:, 1:] + h_gby_segs = h_gby_segs / torch.clamp(cnt_gby_segs[:, :, None], min=1) + return h_gby_segs, cnt_gby_segs + +def expand_word2ph(word_encoding, ph2word): + word_encoding = F.pad(word_encoding,[0,0,1,0]) + ph2word_ = ph2word[:, :, None].repeat([1, 1, word_encoding.shape[-1]]) + out = torch.gather(word_encoding, 1, ph2word_) # [B, T, H] + return out diff --git a/text_to_speech/utils/os_utils.py b/text_to_speech/utils/os_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4567d17c398c535884600cdd86a36a823acb886f --- /dev/null +++ b/text_to_speech/utils/os_utils.py @@ -0,0 +1,20 @@ +import os +import subprocess + + +def link_file(from_file, to_file): + subprocess.check_call( + f'ln -s "`realpath --relative-to="{os.path.dirname(to_file)}" "{from_file}"`" "{to_file}"', shell=True) + + +def move_file(from_file, to_file): + subprocess.check_call(f'mv "{from_file}" "{to_file}"', shell=True) + + +def copy_file(from_file, to_file): + subprocess.check_call(f'cp -r "{from_file}" "{to_file}"', shell=True) + + +def remove_file(*fns): + for f in fns: + subprocess.check_call(f'rm -rf "{f}"', shell=True) diff --git a/text_to_speech/utils/plot/plot.py b/text_to_speech/utils/plot/plot.py new file mode 100644 index 0000000000000000000000000000000000000000..9d7fc02cef69fa5517228437156e687ca054efc8 --- /dev/null +++ b/text_to_speech/utils/plot/plot.py @@ -0,0 +1,51 @@ +import matplotlib + +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import numpy as np +import torch + +LINE_COLORS = ['w', 'r', 'orange', 'k', 'cyan', 'm', 'b', 'lime', 'g', 'brown', 'navy'] + + +def spec_to_figure(spec, vmin=None, vmax=None, title='', f0s=None, dur_info=None): + if isinstance(spec, torch.Tensor): + spec = spec.cpu().numpy() + H = spec.shape[1] // 2 + fig = plt.figure(figsize=(12, 6)) + plt.title(title) + plt.pcolor(spec.T, vmin=vmin, vmax=vmax) + if dur_info is not None: + assert isinstance(dur_info, dict) + txt = dur_info['txt'] + dur_gt = dur_info['dur_gt'] + if isinstance(dur_gt, torch.Tensor): + dur_gt = dur_gt.cpu().numpy() + dur_gt = np.cumsum(dur_gt).astype(int) + for i in range(len(dur_gt)): + shift = (i % 8) + 1 + plt.text(dur_gt[i], shift * 4, txt[i]) + plt.vlines(dur_gt[i], 0, H // 2, colors='b') # blue is gt + plt.xlim(0, dur_gt[-1]) + if 'dur_pred' in dur_info: + dur_pred = dur_info['dur_pred'] + if isinstance(dur_pred, torch.Tensor): + dur_pred = dur_pred.cpu().numpy() + dur_pred = np.cumsum(dur_pred).astype(int) + for i in range(len(dur_pred)): + shift = (i % 8) + 1 + plt.text(dur_pred[i], H + shift * 4, txt[i]) + plt.vlines(dur_pred[i], H, H * 1.5, colors='r') # red is pred + plt.xlim(0, max(dur_gt[-1], dur_pred[-1])) + if f0s is not None: + ax = plt.gca() + ax2 = ax.twinx() + if not isinstance(f0s, dict): + f0s = {'f0': f0s} + for i, (k, f0) in enumerate(f0s.items()): + if isinstance(f0, torch.Tensor): + f0 = f0.cpu().numpy() + ax2.plot(f0, label=k, c=LINE_COLORS[i], linewidth=1, alpha=0.5) + ax2.set_ylim(0, 1000) + ax2.legend() + return fig diff --git a/text_to_speech/utils/text/__pycache__/text_encoder.cpython-38.pyc b/text_to_speech/utils/text/__pycache__/text_encoder.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb9dbf0659fe0285a375cfee4e05314e4fa3d61f Binary files /dev/null and b/text_to_speech/utils/text/__pycache__/text_encoder.cpython-38.pyc differ diff --git a/text_to_speech/utils/text/__pycache__/text_norm.cpython-38.pyc b/text_to_speech/utils/text/__pycache__/text_norm.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d642e44c2c36abebad62824e0afe39caf1fe4c1 Binary files /dev/null and b/text_to_speech/utils/text/__pycache__/text_norm.cpython-38.pyc differ diff --git a/text_to_speech/utils/text/encoding.py b/text_to_speech/utils/text/encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..f09f514613fd44a27450fe7c04cbdf5ebfbe78a8 --- /dev/null +++ b/text_to_speech/utils/text/encoding.py @@ -0,0 +1,9 @@ +import chardet + + +def get_encoding(file): + with open(file, 'rb') as f: + encoding = chardet.detect(f.read())['encoding'] + if encoding == 'GB2312': + encoding = 'GB18030' + return encoding diff --git a/text_to_speech/utils/text/text_encoder.py b/text_to_speech/utils/text/text_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..5f3fbd457f75a96b542f53668a9f2d289a6e674e --- /dev/null +++ b/text_to_speech/utils/text/text_encoder.py @@ -0,0 +1,272 @@ +import json +import re +import six +from six.moves import range # pylint: disable=redefined-builtin + +PAD = "" +EOS = "" +UNK = "" +SEG = "|" +PUNCS = '!,.?;:' +RESERVED_TOKENS = [PAD, EOS, UNK] +NUM_RESERVED_TOKENS = len(RESERVED_TOKENS) +PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0 +EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1 +UNK_ID = RESERVED_TOKENS.index(UNK) # Normally 2 + +if six.PY2: + RESERVED_TOKENS_BYTES = RESERVED_TOKENS +else: + RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")] + +# Regular expression for unescaping token strings. +# '\u' is converted to '_' +# '\\' is converted to '\' +# '\213;' is converted to unichr(213) +_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);") +_ESCAPE_CHARS = set(u"\\_u;0123456789") + + +def strip_ids(ids, ids_to_strip): + """Strip ids_to_strip from the end ids.""" + ids = list(ids) + while ids and ids[-1] in ids_to_strip: + ids.pop() + return ids + + +class TextEncoder(object): + """Base class for converting from ints to/from human readable strings.""" + + def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS): + self._num_reserved_ids = num_reserved_ids + + @property + def num_reserved_ids(self): + return self._num_reserved_ids + + def encode(self, s): + """Transform a human-readable string into a sequence of int ids. + + The ids should be in the range [num_reserved_ids, vocab_size). Ids [0, + num_reserved_ids) are reserved. + + EOS is not appended. + + Args: + s: human-readable string to be converted. + + Returns: + ids: list of integers + """ + return [int(w) + self._num_reserved_ids for w in s.split()] + + def decode(self, ids, strip_extraneous=False): + """Transform a sequence of int ids into a human-readable string. + + EOS is not expected in ids. + + Args: + ids: list of integers to be converted. + strip_extraneous: bool, whether to strip off extraneous tokens + (EOS and PAD). + + Returns: + s: human-readable string. + """ + if strip_extraneous: + ids = strip_ids(ids, list(range(self._num_reserved_ids or 0))) + return " ".join(self.decode_list(ids)) + + def decode_list(self, ids): + """Transform a sequence of int ids into a their string versions. + + This method supports transforming individual input/output ids to their + string versions so that sequence to/from text conversions can be visualized + in a human readable format. + + Args: + ids: list of integers to be converted. + + Returns: + strs: list of human-readable string. + """ + decoded_ids = [] + for id_ in ids: + if 0 <= id_ < self._num_reserved_ids: + decoded_ids.append(RESERVED_TOKENS[int(id_)]) + else: + decoded_ids.append(id_ - self._num_reserved_ids) + return [str(d) for d in decoded_ids] + + @property + def vocab_size(self): + raise NotImplementedError() + + +class TokenTextEncoder(TextEncoder): + """Encoder based on a user-supplied vocabulary (file or list).""" + + def __init__(self, + vocab_filename, + reverse=False, + vocab_list=None, + replace_oov=None, + num_reserved_ids=NUM_RESERVED_TOKENS): + """Initialize from a file or list, one token per line. + + Handling of reserved tokens works as follows: + - When initializing from a list, we add reserved tokens to the vocab. + - When initializing from a file, we do not add reserved tokens to the vocab. + - When saving vocab files, we save reserved tokens to the file. + + Args: + vocab_filename: If not None, the full filename to read vocab from. If this + is not None, then vocab_list should be None. + reverse: Boolean indicating if tokens should be reversed during encoding + and decoding. + vocab_list: If not None, a list of elements of the vocabulary. If this is + not None, then vocab_filename should be None. + replace_oov: If not None, every out-of-vocabulary token seen when + encoding will be replaced by this string (which must be in vocab). + num_reserved_ids: Number of IDs to save for reserved tokens like . + """ + super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids) + self._reverse = reverse + self._replace_oov = replace_oov + if vocab_filename: + self._init_vocab_from_file(vocab_filename) + else: + assert vocab_list is not None + self._init_vocab_from_list(vocab_list) + self.pad_index = self.token_to_id[PAD] + self.eos_index = self.token_to_id[EOS] + self.unk_index = self.token_to_id[UNK] + self.seg_index = self.token_to_id[SEG] if SEG in self.token_to_id else self.eos_index + + def encode(self, s): + """Converts a space-separated string of tokens to a list of ids.""" + sentence = s + tokens = sentence.strip().split() + if self._replace_oov is not None: + tokens = [t if t in self.token_to_id else self._replace_oov + for t in tokens] + ret = [self.token_to_id[tok] for tok in tokens] + return ret[::-1] if self._reverse else ret + + def decode(self, ids, strip_eos=False, strip_padding=False): + if strip_padding and self.pad() in list(ids): + pad_pos = list(ids).index(self.pad()) + ids = ids[:pad_pos] + if strip_eos and self.eos() in list(ids): + eos_pos = list(ids).index(self.eos()) + ids = ids[:eos_pos] + return " ".join(self.decode_list(ids)) + + def decode_list(self, ids): + seq = reversed(ids) if self._reverse else ids + return [self._safe_id_to_token(i) for i in seq] + + @property + def vocab_size(self): + return len(self.id_to_token) + + def __len__(self): + return self.vocab_size + + def _safe_id_to_token(self, idx): + return self.id_to_token.get(idx, "ID_%d" % idx) + + def _init_vocab_from_file(self, filename): + """Load vocab from a file. + + Args: + filename: The file to load vocabulary from. + """ + with open(filename) as f: + tokens = [token.strip() for token in f.readlines()] + + def token_gen(): + for token in tokens: + yield token + + self._init_vocab(token_gen(), add_reserved_tokens=False) + + def _init_vocab_from_list(self, vocab_list): + """Initialize tokens from a list of tokens. + + It is ok if reserved tokens appear in the vocab list. They will be + removed. The set of tokens in vocab_list should be unique. + + Args: + vocab_list: A list of tokens. + """ + + def token_gen(): + for token in vocab_list: + if token not in RESERVED_TOKENS: + yield token + + self._init_vocab(token_gen()) + + def _init_vocab(self, token_generator, add_reserved_tokens=True): + """Initialize vocabulary with tokens from token_generator.""" + + self.id_to_token = {} + non_reserved_start_index = 0 + + if add_reserved_tokens: + self.id_to_token.update(enumerate(RESERVED_TOKENS)) + non_reserved_start_index = len(RESERVED_TOKENS) + + self.id_to_token.update( + enumerate(token_generator, start=non_reserved_start_index)) + + # _token_to_id is the reverse of _id_to_token + self.token_to_id = dict((v, k) for k, v in six.iteritems(self.id_to_token)) + + def pad(self): + return self.pad_index + + def eos(self): + return self.eos_index + + def unk(self): + return self.unk_index + + def seg(self): + return self.seg_index + + def store_to_file(self, filename): + """Write vocab file to disk. + + Vocab files have one token per line. The file ends in a newline. Reserved + tokens are written to the vocab file as well. + + Args: + filename: Full path of the file to store the vocab to. + """ + with open(filename, "w") as f: + for i in range(len(self.id_to_token)): + f.write(self.id_to_token[i] + "\n") + + def sil_phonemes(self): + return [p for p in self.id_to_token.values() if is_sil_phoneme(p)] + + # add by zhenhiye + def add_new_token(self, new_token): + assert new_token not in list(self.id_to_token.values()) + num_existing_tokens = len(self.id_to_token) + self.id_to_token[num_existing_tokens] = new_token + self.token_to_id = dict((v, k) for k, v in six.iteritems(self.id_to_token)) + print(f"Added {new_token} into the token dict!") + +def build_token_encoder(token_list_file): + token_list = json.load(open(token_list_file)) + return TokenTextEncoder(None, vocab_list=token_list, replace_oov='') + + +def is_sil_phoneme(p): + return p == '' or not p[0].isalpha() + # for aishell_notone_sing + # return p == '' or not p[0].isalpha() or p == 'breathe' diff --git a/text_to_speech/utils/text/text_norm.py b/text_to_speech/utils/text/text_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..863c2fb235e209f25cce954ec9b585cb6fe13c96 --- /dev/null +++ b/text_to_speech/utils/text/text_norm.py @@ -0,0 +1,797 @@ +# coding=utf-8 +# Authors: +# 2019.5 Zhiyang Zhou (https://github.com/Joee1995/chn_text_norm.git) +# 2019.9 Jiayu DU +# +# requirements: +# - python 3.X +# notes: python 2.X WILL fail or produce misleading results + +import sys, os, argparse, codecs, string, re + +# ================================================================================ # +# basic constant +# ================================================================================ # +CHINESE_DIGIS = u'零一二三四五六七八九' +BIG_CHINESE_DIGIS_SIMPLIFIED = u'零壹贰叁肆伍陆柒捌玖' +BIG_CHINESE_DIGIS_TRADITIONAL = u'零壹貳參肆伍陸柒捌玖' +SMALLER_BIG_CHINESE_UNITS_SIMPLIFIED = u'十百千万' +SMALLER_BIG_CHINESE_UNITS_TRADITIONAL = u'拾佰仟萬' +LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'亿兆京垓秭穰沟涧正载' +LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'億兆京垓秭穰溝澗正載' +SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'十百千万' +SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'拾佰仟萬' + +ZERO_ALT = u'〇' +ONE_ALT = u'幺' +TWO_ALTS = [u'两', u'兩'] + +POSITIVE = [u'正', u'正'] +NEGATIVE = [u'负', u'負'] +POINT = [u'点', u'點'] +# PLUS = [u'加', u'加'] +# SIL = [u'杠', u'槓'] + +# 中文数字系统类型 +NUMBERING_TYPES = ['low', 'mid', 'high'] + +CURRENCY_NAMES = '(人民币|美元|日元|英镑|欧元|马克|法郎|加拿大元|澳元|港币|先令|芬兰马克|爱尔兰镑|' \ + '里拉|荷兰盾|埃斯库多|比塞塔|印尼盾|林吉特|新西兰元|比索|卢布|新加坡元|韩元|泰铢)' +CURRENCY_UNITS = '((亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)' +COM_QUANTIFIERS = '(匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|' \ + '砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|' \ + '针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|' \ + '毫|厘|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|' \ + '盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|旬|' \ + '纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块)' + +# punctuation information are based on Zhon project (https://github.com/tsroten/zhon.git) +CHINESE_PUNC_STOP = '!?。。' +CHINESE_PUNC_NON_STOP = '"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏' +CHINESE_PUNC_LIST = CHINESE_PUNC_STOP + CHINESE_PUNC_NON_STOP + + +# ================================================================================ # +# basic class +# ================================================================================ # +class ChineseChar(object): + """ + 中文字符 + 每个字符对应简体和繁体, + e.g. 简体 = '负', 繁体 = '負' + 转换时可转换为简体或繁体 + """ + + def __init__(self, simplified, traditional): + self.simplified = simplified + self.traditional = traditional + # self.__repr__ = self.__str__ + + def __str__(self): + return self.simplified or self.traditional or None + + def __repr__(self): + return self.__str__() + + +class ChineseNumberUnit(ChineseChar): + """ + 中文数字/数位字符 + 每个字符除繁简体外还有一个额外的大写字符 + e.g. '陆' 和 '陸' + """ + + def __init__(self, power, simplified, traditional, big_s, big_t): + super(ChineseNumberUnit, self).__init__(simplified, traditional) + self.power = power + self.big_s = big_s + self.big_t = big_t + + def __str__(self): + return '10^{}'.format(self.power) + + @classmethod + def create(cls, index, value, numbering_type=NUMBERING_TYPES[1], small_unit=False): + + if small_unit: + return ChineseNumberUnit(power=index + 1, + simplified=value[0], traditional=value[1], big_s=value[1], big_t=value[1]) + elif numbering_type == NUMBERING_TYPES[0]: + return ChineseNumberUnit(power=index + 8, + simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) + elif numbering_type == NUMBERING_TYPES[1]: + return ChineseNumberUnit(power=(index + 2) * 4, + simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) + elif numbering_type == NUMBERING_TYPES[2]: + return ChineseNumberUnit(power=pow(2, index + 3), + simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) + else: + raise ValueError( + 'Counting type should be in {0} ({1} provided).'.format(NUMBERING_TYPES, numbering_type)) + + +class ChineseNumberDigit(ChineseChar): + """ + 中文数字字符 + """ + + def __init__(self, value, simplified, traditional, big_s, big_t, alt_s=None, alt_t=None): + super(ChineseNumberDigit, self).__init__(simplified, traditional) + self.value = value + self.big_s = big_s + self.big_t = big_t + self.alt_s = alt_s + self.alt_t = alt_t + + def __str__(self): + return str(self.value) + + @classmethod + def create(cls, i, v): + return ChineseNumberDigit(i, v[0], v[1], v[2], v[3]) + + +class ChineseMath(ChineseChar): + """ + 中文数位字符 + """ + + def __init__(self, simplified, traditional, symbol, expression=None): + super(ChineseMath, self).__init__(simplified, traditional) + self.symbol = symbol + self.expression = expression + self.big_s = simplified + self.big_t = traditional + + +CC, CNU, CND, CM = ChineseChar, ChineseNumberUnit, ChineseNumberDigit, ChineseMath + + +class NumberSystem(object): + """ + 中文数字系统 + """ + pass + + +class MathSymbol(object): + """ + 用于中文数字系统的数学符号 (繁/简体), e.g. + positive = ['正', '正'] + negative = ['负', '負'] + point = ['点', '點'] + """ + + def __init__(self, positive, negative, point): + self.positive = positive + self.negative = negative + self.point = point + + def __iter__(self): + for v in self.__dict__.values(): + yield v + + +# class OtherSymbol(object): +# """ +# 其他符号 +# """ +# +# def __init__(self, sil): +# self.sil = sil +# +# def __iter__(self): +# for v in self.__dict__.values(): +# yield v + + +# ================================================================================ # +# basic utils +# ================================================================================ # +def create_system(numbering_type=NUMBERING_TYPES[1]): + """ + 根据数字系统类型返回创建相应的数字系统,默认为 mid + NUMBERING_TYPES = ['low', 'mid', 'high']: 中文数字系统类型 + low: '兆' = '亿' * '十' = $10^{9}$, '京' = '兆' * '十', etc. + mid: '兆' = '亿' * '万' = $10^{12}$, '京' = '兆' * '万', etc. + high: '兆' = '亿' * '亿' = $10^{16}$, '京' = '兆' * '兆', etc. + 返回对应的数字系统 + """ + + # chinese number units of '亿' and larger + all_larger_units = zip( + LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED, LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL) + larger_units = [CNU.create(i, v, numbering_type, False) + for i, v in enumerate(all_larger_units)] + # chinese number units of '十, 百, 千, 万' + all_smaller_units = zip( + SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED, SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL) + smaller_units = [CNU.create(i, v, small_unit=True) + for i, v in enumerate(all_smaller_units)] + # digis + chinese_digis = zip(CHINESE_DIGIS, CHINESE_DIGIS, + BIG_CHINESE_DIGIS_SIMPLIFIED, BIG_CHINESE_DIGIS_TRADITIONAL) + digits = [CND.create(i, v) for i, v in enumerate(chinese_digis)] + digits[0].alt_s, digits[0].alt_t = ZERO_ALT, ZERO_ALT + digits[1].alt_s, digits[1].alt_t = ONE_ALT, ONE_ALT + digits[2].alt_s, digits[2].alt_t = TWO_ALTS[0], TWO_ALTS[1] + + # symbols + positive_cn = CM(POSITIVE[0], POSITIVE[1], '+', lambda x: x) + negative_cn = CM(NEGATIVE[0], NEGATIVE[1], '-', lambda x: -x) + point_cn = CM(POINT[0], POINT[1], '.', lambda x, + y: float(str(x) + '.' + str(y))) + # sil_cn = CM(SIL[0], SIL[1], '-', lambda x, y: float(str(x) + '-' + str(y))) + system = NumberSystem() + system.units = smaller_units + larger_units + system.digits = digits + system.math = MathSymbol(positive_cn, negative_cn, point_cn) + # system.symbols = OtherSymbol(sil_cn) + return system + + +def chn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]): + def get_symbol(char, system): + for u in system.units: + if char in [u.traditional, u.simplified, u.big_s, u.big_t]: + return u + for d in system.digits: + if char in [d.traditional, d.simplified, d.big_s, d.big_t, d.alt_s, d.alt_t]: + return d + for m in system.math: + if char in [m.traditional, m.simplified]: + return m + + def string2symbols(chinese_string, system): + int_string, dec_string = chinese_string, '' + for p in [system.math.point.simplified, system.math.point.traditional]: + if p in chinese_string: + int_string, dec_string = chinese_string.split(p) + break + return [get_symbol(c, system) for c in int_string], \ + [get_symbol(c, system) for c in dec_string] + + def correct_symbols(integer_symbols, system): + """ + 一百八 to 一百八十 + 一亿一千三百万 to 一亿 一千万 三百万 + """ + + if integer_symbols and isinstance(integer_symbols[0], CNU): + if integer_symbols[0].power == 1: + integer_symbols = [system.digits[1]] + integer_symbols + + if len(integer_symbols) > 1: + if isinstance(integer_symbols[-1], CND) and isinstance(integer_symbols[-2], CNU): + integer_symbols.append( + CNU(integer_symbols[-2].power - 1, None, None, None, None)) + + result = [] + unit_count = 0 + for s in integer_symbols: + if isinstance(s, CND): + result.append(s) + unit_count = 0 + elif isinstance(s, CNU): + current_unit = CNU(s.power, None, None, None, None) + unit_count += 1 + + if unit_count == 1: + result.append(current_unit) + elif unit_count > 1: + for i in range(len(result)): + if isinstance(result[-i - 1], CNU) and result[-i - 1].power < current_unit.power: + result[-i - 1] = CNU(result[-i - 1].power + + current_unit.power, None, None, None, None) + return result + + def compute_value(integer_symbols): + """ + Compute the value. + When current unit is larger than previous unit, current unit * all previous units will be used as all previous units. + e.g. '两千万' = 2000 * 10000 not 2000 + 10000 + """ + value = [0] + last_power = 0 + for s in integer_symbols: + if isinstance(s, CND): + value[-1] = s.value + elif isinstance(s, CNU): + value[-1] *= pow(10, s.power) + if s.power > last_power: + value[:-1] = list(map(lambda v: v * + pow(10, s.power), value[:-1])) + last_power = s.power + value.append(0) + return sum(value) + + system = create_system(numbering_type) + int_part, dec_part = string2symbols(chinese_string, system) + int_part = correct_symbols(int_part, system) + int_str = str(compute_value(int_part)) + dec_str = ''.join([str(d.value) for d in dec_part]) + if dec_part: + return '{0}.{1}'.format(int_str, dec_str) + else: + return int_str + + +def num2chn(number_string, numbering_type=NUMBERING_TYPES[1], big=False, + traditional=False, alt_zero=False, alt_one=False, alt_two=True, + use_zeros=True, use_units=True): + def get_value(value_string, use_zeros=True): + + striped_string = value_string.lstrip('0') + + # record nothing if all zeros + if not striped_string: + return [] + + # record one digits + elif len(striped_string) == 1: + if use_zeros and len(value_string) != len(striped_string): + return [system.digits[0], system.digits[int(striped_string)]] + else: + return [system.digits[int(striped_string)]] + + # recursively record multiple digits + else: + result_unit = next(u for u in reversed( + system.units) if u.power < len(striped_string)) + result_string = value_string[:-result_unit.power] + return get_value(result_string) + [result_unit] + get_value(striped_string[-result_unit.power:]) + + system = create_system(numbering_type) + + int_dec = number_string.split('.') + if len(int_dec) == 1: + int_string = int_dec[0] + dec_string = "" + elif len(int_dec) == 2: + int_string = int_dec[0] + dec_string = int_dec[1] + else: + raise ValueError( + "invalid input num string with more than one dot: {}".format(number_string)) + + if use_units and len(int_string) > 1: + result_symbols = get_value(int_string) + else: + result_symbols = [system.digits[int(c)] for c in int_string] + dec_symbols = [system.digits[int(c)] for c in dec_string] + if dec_string: + result_symbols += [system.math.point] + dec_symbols + + if alt_two: + liang = CND(2, system.digits[2].alt_s, system.digits[2].alt_t, + system.digits[2].big_s, system.digits[2].big_t) + for i, v in enumerate(result_symbols): + if isinstance(v, CND) and v.value == 2: + next_symbol = result_symbols[i + + 1] if i < len(result_symbols) - 1 else None + previous_symbol = result_symbols[i - 1] if i > 0 else None + if isinstance(next_symbol, CNU) and isinstance(previous_symbol, (CNU, type(None))): + if next_symbol.power != 1 and ((previous_symbol is None) or (previous_symbol.power != 1)): + result_symbols[i] = liang + + # if big is True, '两' will not be used and `alt_two` has no impact on output + if big: + attr_name = 'big_' + if traditional: + attr_name += 't' + else: + attr_name += 's' + else: + if traditional: + attr_name = 'traditional' + else: + attr_name = 'simplified' + + result = ''.join([getattr(s, attr_name) for s in result_symbols]) + + # if not use_zeros: + # result = result.strip(getattr(system.digits[0], attr_name)) + + if alt_zero: + result = result.replace( + getattr(system.digits[0], attr_name), system.digits[0].alt_s) + + if alt_one: + result = result.replace( + getattr(system.digits[1], attr_name), system.digits[1].alt_s) + + for i, p in enumerate(POINT): + if result.startswith(p): + return CHINESE_DIGIS[0] + result + + # ^10, 11, .., 19 + if len(result) >= 2 and result[1] in [SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED[0], + SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL[0]] and \ + result[0] in [CHINESE_DIGIS[1], BIG_CHINESE_DIGIS_SIMPLIFIED[1], BIG_CHINESE_DIGIS_TRADITIONAL[1]]: + result = result[1:] + + return result + + +# ================================================================================ # +# different types of rewriters +# ================================================================================ # +class Cardinal: + """ + CARDINAL类 + """ + + def __init__(self, cardinal=None, chntext=None): + self.cardinal = cardinal + self.chntext = chntext + + def chntext2cardinal(self): + return chn2num(self.chntext) + + def cardinal2chntext(self): + return num2chn(self.cardinal) + + +class Digit: + """ + DIGIT类 + """ + + def __init__(self, digit=None, chntext=None): + self.digit = digit + self.chntext = chntext + + # def chntext2digit(self): + # return chn2num(self.chntext) + + def digit2chntext(self): + return num2chn(self.digit, alt_two=False, use_units=False) + + +class TelePhone: + """ + TELEPHONE类 + """ + + def __init__(self, telephone=None, raw_chntext=None, chntext=None): + self.telephone = telephone + self.raw_chntext = raw_chntext + self.chntext = chntext + + # def chntext2telephone(self): + # sil_parts = self.raw_chntext.split('') + # self.telephone = '-'.join([ + # str(chn2num(p)) for p in sil_parts + # ]) + # return self.telephone + + def telephone2chntext(self, fixed=False): + + if fixed: + sil_parts = self.telephone.split('-') + self.raw_chntext = ''.join([ + num2chn(part, alt_two=False, use_units=False) for part in sil_parts + ]) + self.chntext = self.raw_chntext.replace('', '') + else: + sp_parts = self.telephone.strip('+').split() + self.raw_chntext = ''.join([ + num2chn(part, alt_two=False, use_units=False) for part in sp_parts + ]) + self.chntext = self.raw_chntext.replace('', '') + return self.chntext + + +class Fraction: + """ + FRACTION类 + """ + + def __init__(self, fraction=None, chntext=None): + self.fraction = fraction + self.chntext = chntext + + def chntext2fraction(self): + denominator, numerator = self.chntext.split('分之') + return chn2num(numerator) + '/' + chn2num(denominator) + + def fraction2chntext(self): + numerator, denominator = self.fraction.split('/') + return num2chn(denominator) + '分之' + num2chn(numerator) + + +class Date: + """ + DATE类 + """ + + def __init__(self, date=None, chntext=None): + self.date = date + self.chntext = chntext + + # def chntext2date(self): + # chntext = self.chntext + # try: + # year, other = chntext.strip().split('年', maxsplit=1) + # year = Digit(chntext=year).digit2chntext() + '年' + # except ValueError: + # other = chntext + # year = '' + # if other: + # try: + # month, day = other.strip().split('月', maxsplit=1) + # month = Cardinal(chntext=month).chntext2cardinal() + '月' + # except ValueError: + # day = chntext + # month = '' + # if day: + # day = Cardinal(chntext=day[:-1]).chntext2cardinal() + day[-1] + # else: + # month = '' + # day = '' + # date = year + month + day + # self.date = date + # return self.date + + def date2chntext(self): + date = self.date + try: + year, other = date.strip().split('年', 1) + year = Digit(digit=year).digit2chntext() + '年' + except ValueError: + other = date + year = '' + if other: + try: + month, day = other.strip().split('月', 1) + month = Cardinal(cardinal=month).cardinal2chntext() + '月' + except ValueError: + day = date + month = '' + if day: + day = Cardinal(cardinal=day[:-1]).cardinal2chntext() + day[-1] + else: + month = '' + day = '' + chntext = year + month + day + self.chntext = chntext + return self.chntext + + +class Money: + """ + MONEY类 + """ + + def __init__(self, money=None, chntext=None): + self.money = money + self.chntext = chntext + + # def chntext2money(self): + # return self.money + + def money2chntext(self): + money = self.money + pattern = re.compile(r'(\d+(\.\d+)?)') + matchers = pattern.findall(money) + if matchers: + for matcher in matchers: + money = money.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext()) + self.chntext = money + return self.chntext + + +class Percentage: + """ + PERCENTAGE类 + """ + + def __init__(self, percentage=None, chntext=None): + self.percentage = percentage + self.chntext = chntext + + def chntext2percentage(self): + return chn2num(self.chntext.strip().strip('百分之')) + '%' + + def percentage2chntext(self): + return '百分之' + num2chn(self.percentage.strip().strip('%')) + + +# ================================================================================ # +# NSW Normalizer +# ================================================================================ # +class NSWNormalizer: + def __init__(self, raw_text): + self.raw_text = '^' + raw_text + '$' + self.norm_text = '' + + def _particular(self): + text = self.norm_text + pattern = re.compile(r"(([a-zA-Z]+)二([a-zA-Z]+))") + matchers = pattern.findall(text) + if matchers: + # print('particular') + for matcher in matchers: + text = text.replace(matcher[0], matcher[1] + '2' + matcher[2], 1) + self.norm_text = text + return self.norm_text + + def normalize(self, remove_punc=True): + text = self.raw_text + + # 规范化日期 + pattern = re.compile(r"\D+((([089]\d|(19|20)\d{2})年)?(\d{1,2}月(\d{1,2}[日号])?)?)") + matchers = pattern.findall(text) + if matchers: + # print('date') + for matcher in matchers: + text = text.replace(matcher[0], Date(date=matcher[0]).date2chntext(), 1) + + # 规范化金钱 + pattern = re.compile(r"\D+((\d+(\.\d+)?)[多余几]?" + CURRENCY_UNITS + r"(\d" + CURRENCY_UNITS + r"?)?)") + matchers = pattern.findall(text) + if matchers: + # print('money') + for matcher in matchers: + text = text.replace(matcher[0], Money(money=matcher[0]).money2chntext(), 1) + + # 规范化固话/手机号码 + # 手机 + # http://www.jihaoba.com/news/show/13680 + # 移动:139、138、137、136、135、134、159、158、157、150、151、152、188、187、182、183、184、178、198 + # 联通:130、131、132、156、155、186、185、176 + # 电信:133、153、189、180、181、177 + pattern = re.compile(r"\D((\+?86 ?)?1([38]\d|5[0-35-9]|7[678]|9[89])\d{8})\D") + matchers = pattern.findall(text) + if matchers: + # print('telephone') + for matcher in matchers: + text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(), 1) + # 固话 + pattern = re.compile(r"\D((0(10|2[0-9]|[3-9]\d{2})-?)?[1-9]\d{6,7})\D") + matchers = pattern.findall(text) + if matchers: + # print('fixed telephone') + for matcher in matchers: + text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(fixed=True), 1) + + # 规范化分数 + pattern = re.compile(r"(\d+/\d+)") + matchers = pattern.findall(text) + if matchers: + # print('fraction') + for matcher in matchers: + text = text.replace(matcher, Fraction(fraction=matcher).fraction2chntext(), 1) + + # 规范化百分数 + text = text.replace('%', '%') + pattern = re.compile(r"(\d+(\.\d+)?%)") + matchers = pattern.findall(text) + if matchers: + # print('percentage') + for matcher in matchers: + text = text.replace(matcher[0], Percentage(percentage=matcher[0]).percentage2chntext(), 1) + + # 规范化纯数+量词 + pattern = re.compile(r"(\d+(\.\d+)?)[多余几]?" + COM_QUANTIFIERS) + matchers = pattern.findall(text) + if matchers: + # print('cardinal+quantifier') + for matcher in matchers: + text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1) + + # 规范化小数 + pattern = re.compile(r"(\d+\.\d+)") + matchers = pattern.findall(text) + if matchers: + # print('cardinal') + for matcher in matchers: + text = text.replace(matcher, Cardinal(cardinal=matcher).cardinal2chntext(), 1) + + # 规范化数字编号 + pattern = re.compile(r"(\d{4,32})") + matchers = pattern.findall(text) + if matchers: + # print('digit') + for matcher in matchers: + text = text.replace(matcher, Digit(digit=matcher).digit2chntext(), 1) + + # 规范化其他数字 + pattern = re.compile(r"(\d+(\.\d+)?)") + matchers = pattern.findall(text) + if matchers: + # print('cardinal') + for matcher in matchers: + text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1) + + self.norm_text = text + self._particular() + + text = self.norm_text.lstrip('^').rstrip('$') + if remove_punc: + # Punctuations removal + old_chars = CHINESE_PUNC_LIST + string.punctuation # includes all CN and EN punctuations + new_chars = ' ' * len(old_chars) + del_chars = '' + text = text.translate(str.maketrans(old_chars, new_chars, del_chars)) + return text + + +def nsw_test_case(raw_text): + print('I:' + raw_text) + print('O:' + NSWNormalizer(raw_text).normalize()) + print('') + + +def nsw_test(): + nsw_test_case('固话:0595-23865596或者23880880。') + nsw_test_case('手机:+86 19859213959或者15659451527。') + nsw_test_case('分数:32477/76391。') + nsw_test_case('百分数:80.03%。') + nsw_test_case('编号:31520181154418。') + nsw_test_case('纯数:2983.07克或12345.60米。') + nsw_test_case('日期:1999年2月20日或09年3月15号。') + nsw_test_case('金钱:12块5,34.5元,20.1万, 40多块钱') + nsw_test_case('特殊:O2O或B2C。') + nsw_test_case('3456万吨') + nsw_test_case('2938478321947个') + nsw_test_case('938') + nsw_test_case('今天吃了115个小笼包231个馒头') + nsw_test_case('有62%的概率') + + +if __name__ == '__main__': + # nsw_test() + + p = argparse.ArgumentParser() + p.add_argument('ifile', help='input filename, assume utf-8 encoding') + p.add_argument('ofile', help='output filename') + p.add_argument('--to_upper', action='store_true', help='convert to upper case') + p.add_argument('--to_lower', action='store_true', help='convert to lower case') + p.add_argument('--has_key', action='store_true', help="input text has Kaldi's key as first field.") + p.add_argument('--log_interval', type=int, default=10000, help='log interval in number of processed lines') + args = p.parse_args() + + ifile = codecs.open(args.ifile, 'r', 'utf8') + ofile = codecs.open(args.ofile, 'w+', 'utf8') + + n = 0 + for l in ifile: + key = '' + text = '' + if args.has_key: + cols = l.split(maxsplit=1) + key = cols[0] + if len(cols) == 2: + text = cols[1] + else: + text = '' + else: + text = l + + # cases + if args.to_upper and args.to_lower: + sys.stderr.write('text norm: to_upper OR to_lower?') + exit(1) + if args.to_upper: + text = text.upper() + if args.to_lower: + text = text.lower() + + # NSW(Non-Standard-Word) normalization + text = NSWNormalizer(text).normalize() + + # + if args.has_key: + ofile.write(key + '\t' + text) + else: + ofile.write(text) + + n += 1 + if n % args.log_interval == 0: + sys.stderr.write("text norm: {} lines done.\n".format(n)) + + sys.stderr.write("text norm: {} lines done in total.\n".format(n)) + + ifile.close() + ofile.close()