Fabrice-TIERCELIN commited on
Commit
9e23cf9
1 Parent(s): 703e0c3

Try length parameter

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -55,7 +55,7 @@ class Tango:
55
  def generate(self, prompt, steps = 100, guidance = 3, samples = 1, disable_progress = True):
56
  # Generate audio for a single prompt string
57
  with torch.no_grad():
58
- latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress = disable_progress)
59
  mel = self.vae.decode_first_stage(latents)
60
  wave = self.vae.decode_to_waveform(mel)
61
  return wave
@@ -66,7 +66,7 @@ class Tango:
66
  for k in tqdm(range(0, len(prompts), batch_size)):
67
  batch = prompts[k: k + batch_size]
68
  with torch.no_grad():
69
- latents = self.model.inference(batch, self.scheduler, steps, guidance, samples, disable_progress = disable_progress, length = 20)
70
  mel = self.vae.decode_first_stage(latents)
71
  wave = self.vae.decode_to_waveform(mel)
72
  outputs += [item for item in wave]
 
55
  def generate(self, prompt, steps = 100, guidance = 3, samples = 1, disable_progress = True):
56
  # Generate audio for a single prompt string
57
  with torch.no_grad():
58
+ latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress = disable_progress, length = 20)
59
  mel = self.vae.decode_first_stage(latents)
60
  wave = self.vae.decode_to_waveform(mel)
61
  return wave
 
66
  for k in tqdm(range(0, len(prompts), batch_size)):
67
  batch = prompts[k: k + batch_size]
68
  with torch.no_grad():
69
+ latents = self.model.inference(batch, self.scheduler, steps, guidance, samples, disable_progress = disable_progress)
70
  mel = self.vae.decode_first_stage(latents)
71
  wave = self.vae.decode_to_waveform(mel)
72
  outputs += [item for item in wave]