Fabrice-TIERCELIN commited on
Commit
559b00c
β€’
1 Parent(s): 6cf3804

Automatic device detection

Browse files
Files changed (1) hide show
  1. app.py +59 -58
app.py CHANGED
@@ -2,18 +2,27 @@ import gradio as gr
2
  import json
3
  import torch
4
  import wavio
 
 
5
  from tqdm import tqdm
6
  from huggingface_hub import snapshot_download
7
  from models import AudioDiffusion, DDPMScheduler
8
  from audioldm.audio.stft import TacotronSTFT
9
  from audioldm.variational_autoencoder import AutoencoderKL
10
  from gradio import Markdown
11
- import spaces
 
 
 
 
 
 
 
12
 
13
  class Tango:
14
- def __init__(self, name="declare-lab/tango2", device="cpu"):
15
 
16
- path = snapshot_download(repo_id=name)
17
 
18
  vae_config = json.load(open("{}/vae_config.json".format(path)))
19
  stft_config = json.load(open("{}/stft_config.json".format(path)))
@@ -23,9 +32,9 @@ class Tango:
23
  self.stft = TacotronSTFT(**stft_config).to(device)
24
  self.model = AudioDiffusion(**main_config).to(device)
25
 
26
- vae_weights = torch.load("{}/pytorch_model_vae.bin".format(path), map_location=device)
27
- stft_weights = torch.load("{}/pytorch_model_stft.bin".format(path), map_location=device)
28
- main_weights = torch.load("{}/pytorch_model_main.bin".format(path), map_location=device)
29
 
30
  self.vae.load_state_dict(vae_weights)
31
  self.stft.load_state_dict(stft_weights)
@@ -37,92 +46,84 @@ class Tango:
37
  self.stft.eval()
38
  self.model.eval()
39
 
40
- self.scheduler = DDPMScheduler.from_pretrained(main_config["scheduler_name"], subfolder="scheduler")
41
 
42
  def chunks(self, lst, n):
43
  """ Yield successive n-sized chunks from a list. """
44
  for i in range(0, len(lst), n):
45
  yield lst[i:i + n]
46
 
47
- def generate(self, prompt, steps=100, guidance=3, samples=1, disable_progress=True):
48
- """ Genrate audio for a single prompt string. """
49
  with torch.no_grad():
50
- latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
51
  mel = self.vae.decode_first_stage(latents)
52
  wave = self.vae.decode_to_waveform(mel)
53
  return wave[0]
54
 
55
- def generate_for_batch(self, prompts, steps=200, guidance=3, samples=1, batch_size=8, disable_progress=True):
56
- """ Genrate audio for a list of prompt strings. """
57
  outputs = []
58
  for k in tqdm(range(0, len(prompts), batch_size)):
59
- batch = prompts[k: k+batch_size]
60
  with torch.no_grad():
61
- latents = self.model.inference(batch, self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
62
  mel = self.vae.decode_first_stage(latents)
63
  wave = self.vae.decode_to_waveform(mel)
64
  outputs += [item for item in wave]
65
  if samples == 1:
66
  return outputs
67
- else:
68
- return list(self.chunks(outputs, samples))
69
 
70
  # Initialize TANGO
71
 
72
- tango = Tango(device="cpu")
73
- tango.vae.to("cpu")
74
- tango.stft.to("cpu")
75
- tango.model.to("cpu")
76
 
77
- @spaces.GPU(duration=60)
78
  def gradio_generate(prompt, steps, guidance):
79
  output_wave = tango.generate(prompt, steps, guidance)
80
  # output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"
81
  output_filename = "temp.wav"
82
- wavio.write(output_filename, output_wave, rate=16000, sampwidth=2)
83
 
84
  return output_filename
85
 
86
- # description_text = """
87
- # <p><a href="https://huggingface.co/spaces/declare-lab/tango/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
88
- # Generate audio using TANGO by providing a text prompt.
89
- # <br/><br/>Limitations: TANGO is trained on the small AudioCaps dataset so it may not generate good audio \
90
- # samples related to concepts that it has not seen in training (e.g. singing). For the same reason, TANGO \
91
- # is not always able to finely control its generations over textual control prompts. For example, \
92
- # the generations from TANGO for prompts Chopping tomatoes on a wooden table and Chopping potatoes \
93
- # on a metal table are very similar. \
94
- # <br/><br/>We are currently training another version of TANGO on larger datasets to enhance its generalization, \
95
- # compositional and controllable generation ability.
96
- # <br/><br/>We recommend using a guidance scale of 3. The default number of steps is set to 100. More steps generally lead to better quality of generated audios but will take longer.
97
- # <br/><br/>
98
- # <h1> ChatGPT-enhanced audio generation</h1>
99
- # <br/>
100
- # As TANGO consists of an instruction-tuned LLM, it is able to process complex sound descriptions allowing us to provide more detailed instructions to improve the generation quality.
101
- # For example, ``A boat is moving on the sea'' vs ``The sound of the water lapping against the hull of the boat or splashing as you move through the waves''. The latter is obtained by prompting ChatGPT to explain the sound generated when a boat moves on the sea.
102
- # Using this ChatGPT-generated description of the sound, TANGO provides superior results.
103
- # <p/>
104
- # """
105
  description_text = """
106
- <p><a href="https://huggingface.co/spaces/declare-lab/tango2/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
107
- Generate audio using Tango2 by providing a text prompt. Tango2 was built from Tango and was trained on <a href="https://huggingface.co/datasets/declare-lab/audio-alpaca">Audio-alpaca</a>
108
- <br/><br/> This is the demo for Tango2 for text to audio generation: <a href="https://arxiv.org/abs/2404.09956">Read our paper.</a>
109
- <p/>
110
- """
 
 
 
 
 
 
 
 
 
 
 
 
111
  # Gradio input and output components
112
- input_text = gr.Textbox(lines=2, label="Prompt")
113
- output_audio = gr.Audio(label="Generated Audio", type="filepath")
114
- denoising_steps = gr.Slider(minimum=100, maximum=200, value=100, step=1, label="Steps", interactive=True)
115
- guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guidance Scale", interactive=True)
116
 
117
  # Gradio interface
118
  gr_interface = gr.Interface(
119
- fn=gradio_generate,
120
- inputs=[input_text, denoising_steps, guidance_scale],
121
- outputs=[output_audio],
122
- title="Tango 2: Aligning Diffusion-based Text-to-Audio Generations through Direct Preference Optimization",
123
- description=description_text,
124
- allow_flagging=False,
125
- examples=[
126
  ["Quiet speech and then and airplane flying away"],
127
  ["A bicycle peddling on dirt and gravel followed by a man speaking then laughing"],
128
  ["Ducks quack and water splashes with some animal screeching in the background"],
@@ -147,7 +148,7 @@ gr_interface = gr.Interface(
147
  ["A helicopter is in flight"],
148
  ["A dog barking and a man talking and a racing car passes by"],
149
  ],
150
- cache_examples="lazy", # Turn on to cache.
151
  )
152
 
153
  # Launch Gradio app
 
2
  import json
3
  import torch
4
  import wavio
5
+ import spaces
6
+
7
  from tqdm import tqdm
8
  from huggingface_hub import snapshot_download
9
  from models import AudioDiffusion, DDPMScheduler
10
  from audioldm.audio.stft import TacotronSTFT
11
  from audioldm.variational_autoencoder import AutoencoderKL
12
  from gradio import Markdown
13
+
14
+ # Automatic device detection
15
+ if torch.cuda.is_available():
16
+ device_type = "cuda"
17
+ device_selection = "cuda:0"
18
+ else:
19
+ device_type = "cpu"
20
+ device_selection = "cpu"
21
 
22
  class Tango:
23
+ def __init__(self, name = "declare-lab/tango2", device = device_selection):
24
 
25
+ path = snapshot_download(repo_id = name)
26
 
27
  vae_config = json.load(open("{}/vae_config.json".format(path)))
28
  stft_config = json.load(open("{}/stft_config.json".format(path)))
 
32
  self.stft = TacotronSTFT(**stft_config).to(device)
33
  self.model = AudioDiffusion(**main_config).to(device)
34
 
35
+ vae_weights = torch.load("{}/pytorch_model_vae.bin".format(path), map_location = device)
36
+ stft_weights = torch.load("{}/pytorch_model_stft.bin".format(path), map_location = device)
37
+ main_weights = torch.load("{}/pytorch_model_main.bin".format(path), map_location = device)
38
 
39
  self.vae.load_state_dict(vae_weights)
40
  self.stft.load_state_dict(stft_weights)
 
46
  self.stft.eval()
47
  self.model.eval()
48
 
49
+ self.scheduler = DDPMScheduler.from_pretrained(main_config["scheduler_name"], subfolder = "scheduler")
50
 
51
  def chunks(self, lst, n):
52
  """ Yield successive n-sized chunks from a list. """
53
  for i in range(0, len(lst), n):
54
  yield lst[i:i + n]
55
 
56
+ def generate(self, prompt, steps = 100, guidance = 3, samples = 1, disable_progress = True):
57
+ """ Generate audio for a single prompt string. """
58
  with torch.no_grad():
59
+ latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress = disable_progress)
60
  mel = self.vae.decode_first_stage(latents)
61
  wave = self.vae.decode_to_waveform(mel)
62
  return wave[0]
63
 
64
+ def generate_for_batch(self, prompts, steps = 200, guidance = 3, samples = 1, batch_size = 8, disable_progress = True):
65
+ """ Generate audio for a list of prompt strings. """
66
  outputs = []
67
  for k in tqdm(range(0, len(prompts), batch_size)):
68
+ batch = prompts[k: k + batch_size]
69
  with torch.no_grad():
70
+ latents = self.model.inference(batch, self.scheduler, steps, guidance, samples, disable_progress = disable_progress)
71
  mel = self.vae.decode_first_stage(latents)
72
  wave = self.vae.decode_to_waveform(mel)
73
  outputs += [item for item in wave]
74
  if samples == 1:
75
  return outputs
76
+ return list(self.chunks(outputs, samples))
 
77
 
78
  # Initialize TANGO
79
 
80
+ tango = Tango(device = "cpu")
81
+ tango.vae.to(device_type)
82
+ tango.stft.to(device_type)
83
+ tango.model.to(device_type)
84
 
85
+ @spaces.GPU(duration = 60)
86
  def gradio_generate(prompt, steps, guidance):
87
  output_wave = tango.generate(prompt, steps, guidance)
88
  # output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"
89
  output_filename = "temp.wav"
90
+ wavio.write(output_filename, output_wave, rate = 16000, sampwidth = 2)
91
 
92
  return output_filename
93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  description_text = """
95
+ <p style="text-align: center;">
96
+ <b><big><big><big>Text-to-Audio</big></big></big></b>
97
+ <br/>Generates an audio file, freely, without account, without watermark, that you can download.
98
+ </p>
99
+ <br/>
100
+ <br/>
101
+ πŸš€ Powered by <i>Tango 2</i> AI.
102
+ <br/>
103
+ <ul>
104
+ <li>If you need to generate <b>music</b>, I recommend you to use <i>MusicGen</i>,</li>
105
+ </ul>
106
+ <br/>
107
+ 🐌 Slow process... Your computer must <b><u>not</u></b> enter into standby mode.<br/>You can duplicate this space on a free account, it works on CPU.<br/>
108
+ <a href='https://huggingface.co/spaces/Fabrice-TIERCELIN/Text-to-Audio?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14'></a>
109
+ <br/>
110
+ βš–οΈ You can use, modify and share the generated sounds but not for commercial uses.
111
+ """
112
  # Gradio input and output components
113
+ input_text = gr.Textbox(label = "Prompt", value = "Snort of a horse", lines = 2, autofocus = True)
114
+ output_audio = gr.Audio(label = "Generated Audio", type = "filepath")
115
+ denoising_steps = gr.Slider(label = "Steps", minimum = 100, maximum = 200, value = 100, step = 1, interactive = True)
116
+ guidance_scale = gr.Slider(label = "Guidance Scale", minimum = 1, maximum = 10, value = 3, step = 0.1, interactive = True)
117
 
118
  # Gradio interface
119
  gr_interface = gr.Interface(
120
+ fn = gradio_generate,
121
+ inputs = [input_text, denoising_steps, guidance_scale],
122
+ outputs = [output_audio],
123
+ title = "",
124
+ description = description_text,
125
+ allow_flagging = False,
126
+ examples = [
127
  ["Quiet speech and then and airplane flying away"],
128
  ["A bicycle peddling on dirt and gravel followed by a man speaking then laughing"],
129
  ["Ducks quack and water splashes with some animal screeching in the background"],
 
148
  ["A helicopter is in flight"],
149
  ["A dog barking and a man talking and a racing car passes by"],
150
  ],
151
+ cache_examples = "lazy", # Turn on to cache.
152
  )
153
 
154
  # Launch Gradio app