File size: 6,953 Bytes
b4aec50 fdffacd b4aec50 37952eb b4aec50 2301775 a7ff543 0a02aec b4aec50 357e299 37952eb b4aec50 2301775 b4aec50 37952eb b4aec50 2301775 b4aec50 770531a a664672 b4aec50 0b4abe8 fc12492 3d974c0 cf3c7a3 c285708 cf3c7a3 fa2eee3 a664672 cf3c7a3 a664672 770531a 37952eb 770531a 37952eb 770531a 37952eb b4aec50 0b4abe8 b4aec50 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
import gradio as gr
import json
import torch
import wavio
from tqdm import tqdm
from huggingface_hub import snapshot_download
from models import AudioDiffusion, DDPMScheduler
from audioldm.audio.stft import TacotronSTFT
from audioldm.variational_autoencoder import AutoencoderKL
from gradio import Markdown
class Tango:
def __init__(self, name="declare-lab/tango-full-ft-audiocaps", device="cuda:0"):
path = snapshot_download(repo_id=name)
vae_config = json.load(open("{}/vae_config.json".format(path)))
stft_config = json.load(open("{}/stft_config.json".format(path)))
main_config = json.load(open("{}/main_config.json".format(path)))
self.vae = AutoencoderKL(**vae_config).to(device)
self.stft = TacotronSTFT(**stft_config).to(device)
self.model = AudioDiffusion(**main_config).to(device)
vae_weights = torch.load("{}/pytorch_model_vae.bin".format(path), map_location=device)
stft_weights = torch.load("{}/pytorch_model_stft.bin".format(path), map_location=device)
main_weights = torch.load("{}/pytorch_model_main.bin".format(path), map_location=device)
self.vae.load_state_dict(vae_weights)
self.stft.load_state_dict(stft_weights)
self.model.load_state_dict(main_weights)
print ("Successfully loaded checkpoint from:", name)
self.vae.eval()
self.stft.eval()
self.model.eval()
self.scheduler = DDPMScheduler.from_pretrained(main_config["scheduler_name"], subfolder="scheduler")
def chunks(self, lst, n):
""" Yield successive n-sized chunks from a list. """
for i in range(0, len(lst), n):
yield lst[i:i + n]
def generate(self, prompt, steps=100, guidance=3, samples=1, disable_progress=True):
""" Genrate audio for a single prompt string. """
with torch.no_grad():
latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
mel = self.vae.decode_first_stage(latents)
wave = self.vae.decode_to_waveform(mel)
return wave[0]
def generate_for_batch(self, prompts, steps=200, guidance=3, samples=1, batch_size=8, disable_progress=True):
""" Genrate audio for a list of prompt strings. """
outputs = []
for k in tqdm(range(0, len(prompts), batch_size)):
batch = prompts[k: k+batch_size]
with torch.no_grad():
latents = self.model.inference(batch, self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
mel = self.vae.decode_first_stage(latents)
wave = self.vae.decode_to_waveform(mel)
outputs += [item for item in wave]
if samples == 1:
return outputs
else:
return list(self.chunks(outputs, samples))
# Initialize TANGO
if torch.cuda.is_available():
tango = Tango()
else:
tango = Tango(device="cpu")
def gradio_generate(prompt, steps, guidance):
output_wave = tango.generate(prompt, steps, guidance)
# output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"
output_filename = "temp.wav"
wavio.write(output_filename, output_wave, rate=16000, sampwidth=2)
return output_filename
# description_text = """
# <p><a href="https://huggingface.co/spaces/declare-lab/tango/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
# Generate audio using TANGO by providing a text prompt.
# <br/><br/>Limitations: TANGO is trained on the small AudioCaps dataset so it may not generate good audio \
# samples related to concepts that it has not seen in training (e.g. singing). For the same reason, TANGO \
# is not always able to finely control its generations over textual control prompts. For example, \
# the generations from TANGO for prompts Chopping tomatoes on a wooden table and Chopping potatoes \
# on a metal table are very similar. \
# <br/><br/>We are currently training another version of TANGO on larger datasets to enhance its generalization, \
# compositional and controllable generation ability.
# <br/><br/>We recommend using a guidance scale of 3. The default number of steps is set to 100. More steps generally lead to better quality of generated audios but will take longer.
# <br/><br/>
# <h1> ChatGPT-enhanced audio generation</h1>
# <br/>
# As TANGO consists of an instruction-tuned LLM, it is able to process complex sound descriptions allowing us to provide more detailed instructions to improve the generation quality.
# For example, ``A boat is moving on the sea'' vs ``The sound of the water lapping against the hull of the boat or splashing as you move through the waves''. The latter is obtained by prompting ChatGPT to explain the sound generated when a boat moves on the sea.
# Using this ChatGPT-generated description of the sound, TANGO provides superior results.
# <p/>
# """
description_text = ""
# Gradio input and output components
input_text = gr.inputs.Textbox(lines=2, label="Prompt")
output_audio = gr.outputs.Audio(label="Generated Audio", type="filepath")
denoising_steps = gr.Slider(minimum=100, maximum=200, value=100, step=1, label="Steps", interactive=True)
guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guidance Scale", interactive=True)
# Gradio interface
gr_interface = gr.Interface(
fn=gradio_generate,
inputs=[input_text, denoising_steps, guidance_scale],
outputs=[output_audio],
title="TANGO: Text to Audio using Instruction-Guided Diffusion",
description=description_text,
allow_flagging=False,
examples=[
["A lady is singing a song with a kid"],
["The sound of the water lapping against the hull of the boat or splashing as you move through the waves"],
["An audience cheering and clapping"],
["Rolling thunder with lightning strikes"],
["Gentle water stream, birds chirping and sudden gun shot"],
["A car engine revving"],
["A dog barking"],
["A cat meowing"],
["Wooden table tapping sound while water pouring"],
["Emergency sirens wailing"],
["two gunshots followed by birds flying away while chirping"],
["Whistling with birds chirping"],
["A person snoring"],
["Motor vehicles are driving with loud engines and a person whistles"],
["People cheering in a stadium while thunder and lightning strikes"],
["A helicopter is in flight"],
["A dog barking and a man talking and a racing car passes by"],
],
cache_examples=False,
)
# Launch Gradio app
gr_interface.launch() |