OpenMusic / app.py
jadechoghari's picture
fix readme + markdown
39a196c
raw
history blame
2.39 kB
import gradio as gr
import os
import shutil
import spaces
import sys
# we will clone the repo and install the dependencies
# NOTE: Still fixing bugs, not release, do not try :) !
os.system('pip install -r qa_mdt/requirements.txt')
os.system('pip install xformers==0.0.26.post1')
os.system('pip install torchlibrosa==0.0.9 librosa==0.9.2')
os.system('pip install -q pytorch_lightning==2.1.3 torchlibrosa==0.0.9 librosa==0.9.2 ftfy==6.1.1 braceexpand')
os.system('pip install torch==2.3.0+cu121 torchvision==0.18.0+cu121 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/cu121')
# only then import the necessary modules from qa_mdt
from qa_mdt.pipeline import MOSDiffusionPipeline
pipe = MOSDiffusionPipeline()
# this runs the pipeline with user input and saves the output as 'awesome.wav'
@spaces.GPU()
def generate_waveform(description):
pipe(description)
generated_file_path = "./awesome.wav"
if os.path.exists(generated_file_path):
return generated_file_path
else:
return "Error: Failed to generate the waveform."
intro = """
# 🎢 OpenMusic: AI-Powered Music Diffusion 🎢
![OpenMusic Banner](./banner.png)
Welcome to **OpenMusic**, a next-gen diffusion model designed to generate high-quality audio from text descriptions!
Simply enter a description of the music you'd like to hear, and our AI will generate it for you.
---
### Powered by:
- [GitHub Repository](https://github.com/ivcylc/qa-mdt) by [@changli](https://github.com/ivcylc) πŸŽ“.
- Introduced in this [Paper](https://arxiv.org/pdf/2405.15863)
- Hugging Face Diffusers Implementation 🧨 (Super easy to use): [Model](https://huggingface.co/jadechoghari/qa_mdt) by [@jadechoghari](https://github.com/jadechoghari) πŸ€—.
---
"""
# gradio interface
iface = gr.Interface(
description=intro,
fn=generate_waveform,
inputs=gr.Textbox(lines=2, placeholder="Enter a music description here..."),
outputs=gr.Audio(label="Download the Music 🎼"),
description="Enter a music description, and the model will generate a corresponding audio waveform. Download the output as 'awesome.wav'.",
examples=[
["A modern synthesizer creating futuristic soundscapes."],
["Acoustic ballad with heartfelt lyrics and soft piano."]
],
cache_examples=True
)
# Launch the Gradio app
if __name__ == "__main__":
iface.launch()