File size: 2,386 Bytes
6b28a91
 
 
 
c1af806
6b28a91
 
a9c020a
6b28a91
 
 
 
 
 
 
 
 
 
 
 
 
a66e2eb
6b28a91
 
 
 
 
 
 
 
 
 
39a196c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6b28a91
 
39a196c
6b28a91
97a0727
 
4fc6482
 
39a196c
 
4fc6482
 
6b28a91
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import gradio as gr
import os
import shutil
import spaces
import sys

# we will clone the repo and install the dependencies
# NOTE: Still fixing bugs, not release, do not try :) !
os.system('pip install -r qa_mdt/requirements.txt')
os.system('pip install xformers==0.0.26.post1')
os.system('pip install torchlibrosa==0.0.9 librosa==0.9.2')
os.system('pip install -q pytorch_lightning==2.1.3 torchlibrosa==0.0.9 librosa==0.9.2 ftfy==6.1.1 braceexpand')
os.system('pip install torch==2.3.0+cu121 torchvision==0.18.0+cu121 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/cu121')

# only then import the necessary modules from qa_mdt
from qa_mdt.pipeline import MOSDiffusionPipeline


pipe = MOSDiffusionPipeline()

# this runs the pipeline with user input and saves the output as 'awesome.wav'
@spaces.GPU()
def generate_waveform(description):
    pipe(description)

    generated_file_path = "./awesome.wav"

    if os.path.exists(generated_file_path):
        return generated_file_path
    else:
        return "Error: Failed to generate the waveform."


intro = """
# 🎶 OpenMusic: AI-Powered Music Diffusion 🎶

![OpenMusic Banner](./banner.png)

Welcome to **OpenMusic**, a next-gen diffusion model designed to generate high-quality audio from text descriptions! 

Simply enter a description of the music you'd like to hear, and our AI will generate it for you.

---

### Powered by:

- [GitHub Repository](https://github.com/ivcylc/qa-mdt) by [@changli](https://github.com/ivcylc) 🎓.
- Introduced in this [Paper](https://arxiv.org/pdf/2405.15863)
- Hugging Face Diffusers Implementation 🧨 (Super easy to use): [Model](https://huggingface.co/jadechoghari/qa_mdt) by [@jadechoghari](https://github.com/jadechoghari) 🤗.

---

"""

# gradio interface
iface = gr.Interface(
    description=intro,
    fn=generate_waveform,
    inputs=gr.Textbox(lines=2, placeholder="Enter a music description here..."),
    outputs=gr.Audio(label="Download the Music 🎼"),
    description="Enter a music description, and the model will generate a corresponding audio waveform. Download the output as 'awesome.wav'.",
    examples=[
        ["A modern synthesizer creating futuristic soundscapes."],
        ["Acoustic ballad with heartfelt lyrics and soft piano."]
        ],
    cache_examples=True
)

# Launch the Gradio app
if __name__ == "__main__":
    iface.launch()