jadechoghari
commited on
Commit
•
26fcf3a
1
Parent(s):
7245754
Update audioldm_train/conditional_models.py
Browse files
audioldm_train/conditional_models.py
CHANGED
@@ -4,8 +4,8 @@ sys.path.append("src")
|
|
4 |
import torch
|
5 |
import logging
|
6 |
import torch.nn as nn
|
7 |
-
from audioldm_train.modules.clap.open_clip import create_model
|
8 |
-
from audioldm_train.modules.clap.training.data import get_audio_features
|
9 |
|
10 |
import torchaudio
|
11 |
from transformers import (
|
@@ -15,18 +15,18 @@ from transformers import (
|
|
15 |
MT5EncoderModel,
|
16 |
)
|
17 |
import torch.nn.functional as F
|
18 |
-
from audioldm_train.modules.audiomae.AudioMAE import Vanilla_AudioMAE
|
19 |
-
from audioldm_train.modules.phoneme_encoder.encoder import TextEncoder
|
20 |
|
21 |
from transformers import SpeechT5Processor, AutoTokenizer, GPT2Model, GPT2Tokenizer
|
22 |
from transformers.models.speecht5.modeling_speecht5 import SpeechT5EncoderWithTextPrenet
|
23 |
|
24 |
-
from audioldm_train.modules.audiomae.sequence_gen.model import CLAP2AudioMAE
|
25 |
-
from audioldm_train.modules.audiomae.sequence_gen.sequence_input import (
|
26 |
Sequence2AudioMAE,
|
27 |
)
|
28 |
import numpy as np
|
29 |
-
from audioldm_train.modules.audiomae.sequence_gen.model import Prenet
|
30 |
import json
|
31 |
with open('offset_pretrained_checkpoints.json', 'r') as config_file:
|
32 |
config_data = json.load(config_file)
|
|
|
4 |
import torch
|
5 |
import logging
|
6 |
import torch.nn as nn
|
7 |
+
from qa_mdt.audioldm_train.modules.clap.open_clip import create_model
|
8 |
+
from qa_mdt.audioldm_train.modules.clap.training.data import get_audio_features
|
9 |
|
10 |
import torchaudio
|
11 |
from transformers import (
|
|
|
15 |
MT5EncoderModel,
|
16 |
)
|
17 |
import torch.nn.functional as F
|
18 |
+
from qa_mdt.audioldm_train.modules.audiomae.AudioMAE import Vanilla_AudioMAE
|
19 |
+
from qa_mdt.audioldm_train.modules.phoneme_encoder.encoder import TextEncoder
|
20 |
|
21 |
from transformers import SpeechT5Processor, AutoTokenizer, GPT2Model, GPT2Tokenizer
|
22 |
from transformers.models.speecht5.modeling_speecht5 import SpeechT5EncoderWithTextPrenet
|
23 |
|
24 |
+
from qa_mdt.audioldm_train.modules.audiomae.sequence_gen.model import CLAP2AudioMAE
|
25 |
+
from qa_mdt.audioldm_train.modules.audiomae.sequence_gen.sequence_input import (
|
26 |
Sequence2AudioMAE,
|
27 |
)
|
28 |
import numpy as np
|
29 |
+
from qa_mdt.audioldm_train.modules.audiomae.sequence_gen.model import Prenet
|
30 |
import json
|
31 |
with open('offset_pretrained_checkpoints.json', 'r') as config_file:
|
32 |
config_data = json.load(config_file)
|