Spaces:
Runtime error
Runtime error
Fabrice-TIERCELIN
commited on
Commit
•
4ac3165
1
Parent(s):
f3c4aae
Format choice
Browse filesAllow the user to choose between wav or mp3
app.py
CHANGED
@@ -7,6 +7,7 @@ from huggingface_hub import snapshot_download
|
|
7 |
from models import AudioDiffusion, DDPMScheduler
|
8 |
from audioldm.audio.stft import TacotronSTFT
|
9 |
from audioldm.variational_autoencoder import AutoencoderKL
|
|
|
10 |
from gradio import Markdown
|
11 |
import spaces
|
12 |
|
@@ -83,18 +84,24 @@ tango.stft.to(device_type)
|
|
83 |
tango.model.to(device_type)
|
84 |
|
85 |
@spaces.GPU(duration=120)
|
86 |
-
def gradio_generate(prompt, steps, guidance):
|
87 |
output_wave = tango.generate(prompt, steps, guidance)
|
88 |
# output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"
|
89 |
|
90 |
-
output_filename_1 = "
|
91 |
-
wavio.write(output_filename_1, output_wave
|
|
|
|
|
|
|
|
|
92 |
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
|
|
|
|
98 |
|
99 |
return [output_filename_1, output_filename_2, output_filename_3]
|
100 |
|
@@ -118,13 +125,14 @@ def gradio_generate(prompt, steps, guidance):
|
|
118 |
# <p/>
|
119 |
# """
|
120 |
description_text = """
|
121 |
-
<p><a href="https://huggingface.co/spaces/declare-lab/tango2/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
|
122 |
Generate audio using Tango2 by providing a text prompt. Tango2 was built from Tango and was trained on <a href="https://huggingface.co/datasets/declare-lab/audio-alpaca">Audio-alpaca</a>
|
123 |
<br/><br/> This is the demo for Tango2 for text to audio generation: <a href="https://arxiv.org/abs/2404.09956">Read our paper.</a>
|
124 |
<p/>
|
125 |
"""
|
126 |
# Gradio input and output components
|
127 |
input_text = gr.Textbox(lines=2, label="Prompt")
|
|
|
128 |
output_audio_1 = gr.Audio(label="Generated Audio #1/3", type="filepath")
|
129 |
output_audio_2 = gr.Audio(label="Generated Audio #2/3", type="filepath")
|
130 |
output_audio_3 = gr.Audio(label="Generated Audio #3/3", type="filepath")
|
@@ -134,7 +142,7 @@ guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guid
|
|
134 |
# Gradio interface
|
135 |
gr_interface = gr.Interface(
|
136 |
fn=gradio_generate,
|
137 |
-
inputs=[input_text, denoising_steps, guidance_scale],
|
138 |
outputs=[output_audio_1, output_audio_2, output_audio_3],
|
139 |
title="Tango 2: Aligning Diffusion-based Text-to-Audio Generations through Direct Preference Optimization",
|
140 |
description=description_text,
|
|
|
7 |
from models import AudioDiffusion, DDPMScheduler
|
8 |
from audioldm.audio.stft import TacotronSTFT
|
9 |
from audioldm.variational_autoencoder import AutoencoderKL
|
10 |
+
from pydub import AudioSegment
|
11 |
from gradio import Markdown
|
12 |
import spaces
|
13 |
|
|
|
84 |
tango.model.to(device_type)
|
85 |
|
86 |
@spaces.GPU(duration=120)
|
87 |
+
def gradio_generate(prompt, output_format, steps, guidance):
|
88 |
output_wave = tango.generate(prompt, steps, guidance)
|
89 |
# output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"
|
90 |
|
91 |
+
output_filename_1 = "tmp1.wav"
|
92 |
+
wavio.write(output_filename_1, output_wave, rate=16000, sampwidth=2)
|
93 |
+
output_filename_2 = "tmp2.wav"
|
94 |
+
wavio.write(output_filename_2, output_wave, rate=16000, sampwidth=2)
|
95 |
+
output_filename_3 = "tmp3.wav"
|
96 |
+
wavio.write(output_filename_3, output_wave, rate=16000, sampwidth=2)
|
97 |
|
98 |
+
if (output_format == "mp3"):
|
99 |
+
AudioSegment.from_wav("tmp1.wav").export("tmp1.mp3", format = "mp3")
|
100 |
+
output_filename_1 = "tmp1.mp3"
|
101 |
+
AudioSegment.from_wav("tmp2.wav").export("tmp2.mp3", format = "mp3")
|
102 |
+
output_filename_2 = "tmp2.mp3"
|
103 |
+
AudioSegment.from_wav("tmp3.wav").export("tmp3.mp3", format = "mp3")
|
104 |
+
output_filename_3 = "tmp3.mp3"
|
105 |
|
106 |
return [output_filename_1, output_filename_2, output_filename_3]
|
107 |
|
|
|
125 |
# <p/>
|
126 |
# """
|
127 |
description_text = """
|
128 |
+
<p><a href="https://huggingface.co/spaces/declare-lab/tango2-full/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
|
129 |
Generate audio using Tango2 by providing a text prompt. Tango2 was built from Tango and was trained on <a href="https://huggingface.co/datasets/declare-lab/audio-alpaca">Audio-alpaca</a>
|
130 |
<br/><br/> This is the demo for Tango2 for text to audio generation: <a href="https://arxiv.org/abs/2404.09956">Read our paper.</a>
|
131 |
<p/>
|
132 |
"""
|
133 |
# Gradio input and output components
|
134 |
input_text = gr.Textbox(lines=2, label="Prompt")
|
135 |
+
output_format = gr.Radio(label = "Output format", info = "The file you can download", choices = ["mp3", "wav"], value = "wav")
|
136 |
output_audio_1 = gr.Audio(label="Generated Audio #1/3", type="filepath")
|
137 |
output_audio_2 = gr.Audio(label="Generated Audio #2/3", type="filepath")
|
138 |
output_audio_3 = gr.Audio(label="Generated Audio #3/3", type="filepath")
|
|
|
142 |
# Gradio interface
|
143 |
gr_interface = gr.Interface(
|
144 |
fn=gradio_generate,
|
145 |
+
inputs=[input_text, output_format, denoising_steps, guidance_scale],
|
146 |
outputs=[output_audio_1, output_audio_2, output_audio_3],
|
147 |
title="Tango 2: Aligning Diffusion-based Text-to-Audio Generations through Direct Preference Optimization",
|
148 |
description=description_text,
|