jadechoghari
commited on
Commit
•
880499e
1
Parent(s):
133f869
Update audioldm_train/config/mos_as_token/qa_mdt.yaml
Browse files
audioldm_train/config/mos_as_token/qa_mdt.yaml
CHANGED
@@ -8,10 +8,10 @@ base_root: "/content/qa-mdt"
|
|
8 |
# TODO: change this with your pretrained path
|
9 |
# TODO: pretrained path is also needed in "base_root/offset_pretrained_checkpoints.json"
|
10 |
pretrained:
|
11 |
-
clap_music: "
|
12 |
-
flan_t5: "
|
13 |
-
hifi-gan: "
|
14 |
-
roberta-base: "
|
15 |
|
16 |
# TODO: lmdb dataset that stores pMOS of the training dataset
|
17 |
# while in inference, we don't need it !!!
|
@@ -78,7 +78,7 @@ model:
|
|
78 |
target: audioldm_train.modules.latent_encoder.autoencoder.AutoencoderKL
|
79 |
params:
|
80 |
# TODO: change it with your VAE checkpoint
|
81 |
-
reload_from_ckpt: "
|
82 |
sampling_rate: *sampling_rate
|
83 |
batchsize: 1
|
84 |
monitor: val/rec_loss
|
|
|
8 |
# TODO: change this with your pretrained path
|
9 |
# TODO: pretrained path is also needed in "base_root/offset_pretrained_checkpoints.json"
|
10 |
pretrained:
|
11 |
+
clap_music: "./qa-mdt/checkpoints/clap_music"
|
12 |
+
flan_t5: "./qa-mdt/checkpoints/flant5"
|
13 |
+
hifi-gan: "./qa-mdt/checkpoints/hifi-gan/checkpoints"
|
14 |
+
roberta-base: "./qa-mdt/checkpoints/robertabase"
|
15 |
|
16 |
# TODO: lmdb dataset that stores pMOS of the training dataset
|
17 |
# while in inference, we don't need it !!!
|
|
|
78 |
target: audioldm_train.modules.latent_encoder.autoencoder.AutoencoderKL
|
79 |
params:
|
80 |
# TODO: change it with your VAE checkpoint
|
81 |
+
reload_from_ckpt: "./qa-mdt/checkpoints/hifi-gan/checkpoints/vae_mel_16k_64bins.ckpt"
|
82 |
sampling_rate: *sampling_rate
|
83 |
batchsize: 1
|
84 |
monitor: val/rec_loss
|