fffiloni commited on
Commit
5eca949
1 Parent(s): 9acf377

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -23
app.py CHANGED
@@ -64,48 +64,45 @@ def check_for_wav_in_outputs():
64
  else:
65
  return None
66
 
67
- def infer(text):
68
 
69
  # check if 'outputs' dir exists and empty it if necessary
70
  check_outputs_folder('./example_1')
71
 
72
  # set hparams
73
  output_dir = 'example_1' ### change this output directory
74
-
75
-
76
- duration = 30
77
  num_samples = 1
78
  bs = 1
79
-
80
-
81
  # load your model
82
  musicgen = audiocraft.models.MusicGen.get_pretrained('./ckpt/musicongen') ### change this path
83
  musicgen.set_generation_params(duration=duration, extend_stride=duration//2, top_k = 250)
84
 
85
 
86
- chords = ['C G A:min F']
87
 
88
- descriptions = ["A laid-back blues shuffle with a relaxed tempo, warm guitar tones, and a comfortable groove, perfect for a slow dance or a night in. Instruments: electric guitar, bass, drums."] * num_samples
89
 
90
- bpms = [120] * num_samples
91
 
92
  meters = [4] * num_samples
93
 
94
  wav = []
95
  for i in range(num_samples//bs):
96
- print(f"starting {i} batch...")
97
- temp = musicgen.generate_with_chords_and_beats(descriptions[i*bs:(i+1)*bs],
98
- chords[i*bs:(i+1)*bs],
99
- bpms[i*bs:(i+1)*bs],
100
- meters[i*bs:(i+1)*bs]
101
- )
102
- wav.extend(temp.cpu())
103
 
104
  # save and display generated audio
105
  for idx, one_wav in enumerate(wav):
106
-
107
- sav_path = os.path.join('./output_samples', output_dir, chords[idx] + "|" + descriptions[idx]).replace(" ", "_")
108
- audio_write(sav_path, one_wav.cpu(), musicgen.sample_rate, strategy='loudness', loudness_compressor=True)
109
 
110
  # Print the outputs directory contents
111
  print_directory_contents('./output_samples')
@@ -113,17 +110,40 @@ def infer(text):
113
  print(wav_file_path)
114
  return wav_file_path
115
 
116
- with gr.Blocks() as demo:
117
- with gr.Column():
 
 
 
 
 
 
 
118
  gr.Markdown("#MusiConGen")
119
  with gr.Row():
120
  with gr.Column():
121
- text_in = gr.Textbox()
 
 
 
122
  submit_btn = gr.Button("Submit")
123
  wav_out = gr.Audio(label="Wav Result")
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  submit_btn.click(
125
  fn = infer,
126
- inputs = [text_in],
127
  outputs = [wav_out]
128
  )
129
  demo.launch()
 
64
  else:
65
  return None
66
 
67
+ def infer(prompt_in, chords, duration, bpms):
68
 
69
  # check if 'outputs' dir exists and empty it if necessary
70
  check_outputs_folder('./example_1')
71
 
72
  # set hparams
73
  output_dir = 'example_1' ### change this output directory
74
+
75
+ duration = duration
 
76
  num_samples = 1
77
  bs = 1
78
+
 
79
  # load your model
80
  musicgen = audiocraft.models.MusicGen.get_pretrained('./ckpt/musicongen') ### change this path
81
  musicgen.set_generation_params(duration=duration, extend_stride=duration//2, top_k = 250)
82
 
83
 
84
+ chords = [def infer(prompt_in, chords):]
85
 
86
+ descriptions = [prompt_in] * num_samples
87
 
88
+ bpms = [bpms] * num_samples
89
 
90
  meters = [4] * num_samples
91
 
92
  wav = []
93
  for i in range(num_samples//bs):
94
+ print(f"starting {i} batch...")
95
+ temp = musicgen.generate_with_chords_and_beats(descriptions[i*bs:(i+1)*bs],
96
+ chords[i*bs:(i+1)*bs],
97
+ bpms[i*bs:(i+1)*bs],
98
+ meters[i*bs:(i+1)*bs]
99
+ )
100
+ wav.extend(temp.cpu())
101
 
102
  # save and display generated audio
103
  for idx, one_wav in enumerate(wav):
104
+ sav_path = os.path.join('./output_samples', output_dir, chords[idx] + "|" + descriptions[idx]).replace(" ", "_")
105
+ audio_write(sav_path, one_wav.cpu(), musicgen.sample_rate, strategy='loudness', loudness_compressor=True)
 
106
 
107
  # Print the outputs directory contents
108
  print_directory_contents('./output_samples')
 
110
  print(wav_file_path)
111
  return wav_file_path
112
 
113
+ css="""
114
+ #col-container{
115
+ max-width: 800px;
116
+ margin: 0 auto;
117
+ }
118
+ """
119
+
120
+ with gr.Blocks(css=css) as demo:
121
+ with gr.Column(elem_id="col-container"):
122
  gr.Markdown("#MusiConGen")
123
  with gr.Row():
124
  with gr.Column():
125
+ prompt_in = gr.Textbox(label="Music description", value="A laid-back blues shuffle with a relaxed tempo, warm guitar tones, and a comfortable groove, perfect for a slow dance or a night in. Instruments: electric guitar, bass, drums.")
126
+ chords = gr.Texxtbox(label="Chords progression", value='C G A:min F')
127
+ duration = gr.Slider(label="Sample duration", minimum=4, maximum=30, step=1, value=20)
128
+ bpms = gr.Slider(label="BPMs", minimum=50, maximum=220, step=1, value=120)
129
  submit_btn = gr.Button("Submit")
130
  wav_out = gr.Audio(label="Wav Result")
131
+ gr.Examples(
132
+ label = "Audio description examples",
133
+ examples = [
134
+ ["A laid-back blues shuffle with a relaxed tempo, warm guitar tones, and a comfortable groove, perfect for a slow dance or a night in. Instruments: electric guitar, bass, drums."],
135
+ ["A smooth acid jazz track with a laid-back groove, silky electric piano, and a cool bass, providing a modern take on jazz. Instruments: electric piano, bass, drums."],
136
+ ["A classic rock n' roll tune with catchy guitar riffs, driving drums, and a pulsating bass line, reminiscent of the golden era of rock. Instruments: electric guitar, bass, drums."],
137
+ ["A high-energy funk tune with slap bass, rhythmic guitar riffs, and a tight horn section, guaranteed to get you grooving. Instruments: bass, guitar, trumpet, saxophone, drums."],
138
+ ["A heavy metal onslaught with double kick drum madness, aggressive guitar riffs, and an unrelenting bass, embodying the spirit of metal. Instruments: electric guitar, bass guitar, drums."]
139
+ ],
140
+ inputs = [prompt_in]
141
+ )
142
+
143
+
144
  submit_btn.click(
145
  fn = infer,
146
+ inputs = [prompt_in, chords, duration, bpms],
147
  outputs = [wav_out]
148
  )
149
  demo.launch()