3v324v23 commited on
Commit
0550b50
1 Parent(s): b4fd09f
Files changed (3) hide show
  1. BeatManipulator.py +525 -0
  2. app.py +9 -0
  3. requirements.txt +3 -0
BeatManipulator.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy
2
+ def open_audio(filename=None, lib='auto'):
3
+ if filename is None:
4
+ from tkinter.filedialog import askopenfilename
5
+ filename = askopenfilename(title='select song', filetypes=[("mp3", ".mp3"),("wav", ".wav"),("flac", ".flac"),("ogg", ".ogg"),("wma", ".wma")])
6
+ filename=filename.replace('\\', '/')
7
+ if lib=='pedalboard.io':
8
+ from pedalboard.io import AudioFile
9
+ with AudioFile(filename) as f:
10
+ audio = f.read(f.frames)
11
+ samplerate = f.samplerate
12
+ elif lib=='librosa':
13
+ import librosa
14
+ audio, samplerate = librosa.load(filename, sr=None, mono=False)
15
+ elif lib=='soundfile':
16
+ import soundfile
17
+ audio, samplerate = soundfile.read(filename)
18
+ audio=audio.T
19
+ elif lib=='madmom':
20
+ import madmom
21
+ audio, samplerate = madmom.io.audio.load_audio_file(filename, dtype=float)
22
+ audio=audio.T
23
+ # elif lib=='pydub':
24
+ # from pydub import AudioSegment
25
+ # song=AudioSegment.from_file(filename)
26
+ # audio = song.get_array_of_samples()
27
+ # samplerate=song.frame_rate
28
+ # print(audio)
29
+ # print(filename)
30
+ elif lib=='auto':
31
+ for i in ('madmom', 'soundfile', 'librosa', 'pedalboard.io'):
32
+ try:
33
+ audio,samplerate=open_audio(filename, i)
34
+ break
35
+ except Exception as e:
36
+ print(e)
37
+ if len(audio)<2: audio=[audio,audio]
38
+ return audio,samplerate
39
+
40
+
41
+ def generate_sidechain(samplerate=44100, len=0.5, curve=2, vol0=0, vol1=1, smoothing=40):
42
+ x=numpy.concaterate((numpy.linspace(1,0,smoothing),numpy.linspace(vol0,vol1,int(len*samplerate))**curve))
43
+ return(x,x)
44
+
45
+ def outputfilename(output, filename, suffix='_beatswap'):
46
+ return output+''.join(''.join(filename.split('/')[-1]).split('.')[:-1])+suffix+'.mp3'
47
+
48
+ def generate_sine(len, freq, samplerate, volume=1):
49
+ return numpy.sin(numpy.linspace(0, freq*3.1415926*2*len, int(len*samplerate)))*volume
50
+
51
+ def generate_saw(len, freq, samplerate, volume=1):
52
+ return (numpy.linspace(0, freq*2*len, int(len*samplerate))%2 - 1)*volume
53
+
54
+ def generate_square(len, freq, samplerate, volume=1):
55
+ return ((numpy.linspace(0, freq*2*len, int(len*samplerate)))//1%2 * 2 - 1)*volume
56
+
57
+ class song:
58
+ def __init__(self, filename=None, audio=None, samplerate=None, beatmap=None):
59
+ if filename is None:
60
+ from tkinter.filedialog import askopenfilename
61
+ self.filename = askopenfilename(title='select song', filetypes=[("mp3", ".mp3"),("wav", ".wav"),("flac", ".flac"),("ogg", ".ogg"),("wma", ".wma")])
62
+ self.audio, self.samplerate=open_audio(self.filename)
63
+ else:
64
+ self.filename=filename
65
+ if audio is None or samplerate is None:
66
+ self.audio, self.samplerate=open_audio(self.filename)
67
+ self.beatmap=beatmap
68
+ self.filename=self.filename.replace('\\', '/')
69
+ self.samplerate=int(self.samplerate)
70
+
71
+ def write_audio(self, output:str, lib='auto'):
72
+ if lib=='pedalboard.io':
73
+ if not isinstance(self.audio,numpy.ndarray): self.audio=numpy.asarray(self.audio)
74
+ #print(audio)
75
+ from pedalboard.io import AudioFile
76
+ with AudioFile(output, 'w', self.samplerate, self.audio.shape[0]) as f:
77
+ f.write(self.audio)
78
+ elif lib=='soundfile':
79
+ if not isinstance(self.audio,numpy.ndarray): self.audio=numpy.asarray(self.audio)
80
+ audio=self.audio.T
81
+ import soundfile
82
+ soundfile.write(output, audio, self.samplerate)
83
+ del audio
84
+ elif lib=='auto':
85
+ for i in ('soundfile', 'pedalboard.io'):
86
+ try:
87
+ song.write_audio(self, output, i)
88
+ break
89
+ except Exception as e:
90
+ print(e)
91
+
92
+ # elif lib=='pydub':
93
+ # from pydub import AudioSegment
94
+ # song = AudioSegment(self.audio.tobytes(), frame_rate=self.samplerate, sample_width=2, channels=2)
95
+ # format = output.split('.')[-1]
96
+ # if len(format) > 4:
97
+ # format='mp3'
98
+ # output = output + '.' + format
99
+ # song.export(output, format=format)
100
+
101
+ def beatmap_scale(self, scale:float):
102
+ import math
103
+ if scale!=1:
104
+ a=0
105
+ b=numpy.array([])
106
+ while a <len( self.beatmap[:-math.ceil(scale)]):
107
+ b=numpy.append(b, (1-(a%1))*self.beatmap[math.floor(a)]+(a%1)*self.beatmap[math.ceil(a)])
108
+ a+=scale
109
+ self.beatmap=b
110
+
111
+ def analyze_beats(self, lib='madmom.BeatDetectionProcessor', caching=True, split=None):
112
+ #if audio is None and filename is None: (audio, samplerate) = open_audio()
113
+ if caching is True:
114
+ import hashlib
115
+ with open(self.filename, "rb") as f:
116
+ file_hash = hashlib.blake2b()
117
+ while chunk := f.read(8192):
118
+ file_hash.update(chunk)
119
+ import os
120
+ if not os.path.exists('SavedBeatmaps'):
121
+ os.mkdir('SavedBeatmaps')
122
+ cacheDir="SavedBeatmaps/" + ''.join(self.filename.split('/')[-1]) + lib+"_"+file_hash.hexdigest()[:5]+'.txt'
123
+ try:
124
+ self.beatmap=numpy.loadtxt(cacheDir, dtype=int)
125
+ return numpy.loadtxt(cacheDir, dtype=int)
126
+ except OSError: pass
127
+
128
+ if lib.split('.')[0]=='madmom':
129
+ from collections.abc import MutableMapping, MutableSequence
130
+ import madmom
131
+ if lib=='madmom.BeatTrackingProcessor':
132
+ proc = madmom.features.beats.BeatTrackingProcessor(fps=100)
133
+ act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
134
+ self.beatmap= proc(act)*self.samplerate
135
+ if lib=='madmom.BeatTrackingProcessor.constant':
136
+ proc = madmom.features.beats.BeatTrackingProcessor(fps=100, look_ahead=None)
137
+ act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
138
+ self.beatmap= proc(act)*self.samplerate
139
+ if lib=='madmom.BeatTrackingProcessor.consistent':
140
+ proc = madmom.features.beats.BeatTrackingProcessor(fps=100, look_ahead=None, look_aside=0)
141
+ act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
142
+ self.beatmap= proc(act)*self.samplerate
143
+ elif lib=='madmom.BeatDetectionProcessor':
144
+ proc = madmom.features.beats.BeatDetectionProcessor(fps=100)
145
+ act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
146
+ self.beatmap= proc(act)*self.samplerate
147
+ elif lib=='madmom.BeatDetectionProcessor.consistent':
148
+ proc = madmom.features.beats.BeatDetectionProcessor(fps=100, look_aside=0)
149
+ act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
150
+ self.beatmap= proc(act)*self.samplerate
151
+ elif lib=='madmom.CRFBeatDetectionProcessor':
152
+ proc = madmom.features.beats.CRFBeatDetectionProcessor(fps=100)
153
+ act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
154
+ self.beatmap= proc(act)*self.samplerate
155
+ elif lib=='madmom.CRFBeatDetectionProcessor.constant':
156
+ proc = madmom.features.beats.CRFBeatDetectionProcessor(fps=100, use_factors=True, factors=[0.5, 1, 2])
157
+ act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
158
+ self.beatmap= proc(act)*self.samplerate
159
+ elif lib=='madmom.DBNBeatTrackingProcessor':
160
+ proc = madmom.features.beats.DBNBeatTrackingProcessor(fps=100)
161
+ act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
162
+ self.beatmap= proc(act)*self.samplerate
163
+ elif lib=='madmom.DBNBeatTrackingProcessor.1000':
164
+ proc = madmom.features.beats.DBNBeatTrackingProcessor(fps=100, transition_lambda=1000)
165
+ act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
166
+ self.beatmap= proc(act)*self.samplerate
167
+ elif lib=='madmom.MultiModelSelectionProcessor': #broken
168
+ proc = madmom.features.beats.RNNBeatProcessor(post_processor=None)
169
+ predictions = proc(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
170
+ mm_proc = madmom.features.beats.MultiModelSelectionProcessor(num_ref_predictions=None)
171
+ self.beatmap= numpy.sort(mm_proc(predictions)*self.samplerate)
172
+ elif lib=='madmom.DBNDownBeatTrackingProcessor':
173
+ proc = madmom.features.downbeats.DBNDownBeatTrackingProcessor(beats_per_bar=[4], fps=100)
174
+ act = madmom.features.downbeats.RNNDownBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
175
+ self.beatmap= proc(act)*self.samplerate
176
+ self.beatmap=self.beatmap[:,0]
177
+ elif lib=='madmom.PatternTrackingProcessor': #broken
178
+ from madmom.models import PATTERNS_BALLROOM
179
+ proc = madmom.features.downbeats.PatternTrackingProcessor(PATTERNS_BALLROOM, fps=50)
180
+ from madmom.audio.spectrogram import LogarithmicSpectrogramProcessor, SpectrogramDifferenceProcessor, MultiBandSpectrogramProcessor
181
+ from madmom.processors import SequentialProcessor
182
+ log = LogarithmicSpectrogramProcessor()
183
+ diff = SpectrogramDifferenceProcessor(positive_diffs=True)
184
+ mb = MultiBandSpectrogramProcessor(crossover_frequencies=[270])
185
+ pre_proc = SequentialProcessor([log, diff, mb])
186
+ act = pre_proc(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
187
+ self.beatmap= proc(act)*self.samplerate
188
+ self.beatmap=self.beatmap[:,0]
189
+ elif lib=='madmom.DBNBarTrackingProcessor': #broken
190
+ beats = song.analyze_beats(self,lib='madmom.DBNBeatTrackingProcessor', caching = caching)
191
+ proc = madmom.features.downbeats.DBNBarTrackingProcessor(beats_per_bar=[4], fps=100)
192
+ act = madmom.features.downbeats.RNNBarProcessor()(((madmom.audio.signal.Signal(self.audio.T, self.samplerate)), beats))
193
+ self.beatmap= proc(act)*self.samplerate
194
+ elif lib=='librosa': #broken in 3.9, works in 3.8
195
+ import librosa
196
+ beat_frames = librosa.beat.beat_track(y=self.audio[0], sr=self.samplerate,hop_length=512)
197
+ self.beatmap = librosa.frames_to_samples(beat_frames[1])
198
+ # elif lib=='BeatNet':
199
+ # from BeatNet.BeatNet import BeatNet # doesn't seem to work well for some reason
200
+ # estimator = BeatNet(1, mode='offline', inference_model='DBN', plot=[], thread=False)
201
+ # beatmap = estimator.process(filename)
202
+ # beatmap=beatmap[:,0]*samplerate
203
+ # elif lib=='jump-reward-inference': # doesn't seem to work well for some reason
204
+ # from jump_reward_inference.joint_tracker import joint_inference
205
+ # estimator = joint_inference(1, plot=False)
206
+ # beatmap = estimator.process(filename)
207
+ # beatmap=beatmap[:,0]*samplerate
208
+
209
+ elif lib=='split':
210
+ self.beatmap= list(range(0, len(self.audio), len(self.audio)//split))
211
+ if lib.split('.')[0]=='madmom':
212
+ self.beatmap=numpy.absolute(self.beatmap-500)
213
+
214
+ if caching is True: numpy.savetxt(cacheDir, self.beatmap.astype(int))
215
+
216
+ def audio_autotrim(self):
217
+ n=0
218
+ for i in self.audio[0]:
219
+ if i>=0.0001:break
220
+ n+=1
221
+ self.audio = numpy.asarray([self.audio[0,n:], self.audio[1,n:]])
222
+ #print(beatmap)
223
+ if self.beatmap is not None:
224
+ self.beatmap=numpy.absolute(self.beatmap-n)
225
+ else:
226
+ print('It is recommended to only use autotrim after computing the beatmap')
227
+
228
+ def beatmap_autoscale(self):
229
+ bpm=(self.beatmap[-1]-self.beatmap[0])/(len(self.beatmap)-1)
230
+ #print('BPM =', (bpm/samplerate) * 240, bpm)
231
+ if bpm>=160000: scale=1/8
232
+ elif (bpm)>=80000: scale=1/4
233
+ elif (bpm)>=40000: scale=1/2
234
+ elif (bpm)<=20000: scale=2
235
+ elif (bpm)<=10000: scale=4
236
+ elif (bpm)<=5000: scale=8
237
+ song.beatmap_scale(self,scale)
238
+
239
+ def beatmap_autoinsert(self):
240
+ diff=(self.beatmap[1]-self.beatmap[0])
241
+ while diff<self.beatmap[0]:
242
+ self.beatmap=numpy.insert(self.beatmap, 0, self.beatmap[0]-diff)
243
+
244
+ def beatmap_shift(self, shift: float):
245
+ if shift>0:
246
+ for i in range(len(self.beatmap)-1):
247
+ self.beatmap[i] = self.beatmap[i] + shift * (self.beatmap[i+1] - self.beatmap[i])
248
+ elif shift<0:
249
+ for i in reversed(range(len(self.beatmap)-1)):
250
+ self.beatmap[i+1] = self.beatmap[i+1] - shift * (self.beatmap[i] - self.beatmap[i+1])
251
+
252
+ def beatmap_trim(self, start=0, end=None):
253
+ start*=self.samplerate
254
+ self.beatmap=self.beatmap[self.beatmap>=start].astype(int)
255
+ if end is not None: self.beatmap=self.beatmap[self.beatmap<=end].astype(int)
256
+
257
+
258
+ def beatswap(self, pattern: str, sep=',', smoothing=40, smoothing_mode='replace'):
259
+ import math, numpy
260
+ # get pattern size
261
+ size=0
262
+ #cut processing??? not worth it, it is really fast anyways
263
+ pattern=pattern.replace(' ', '').split(sep)
264
+ for j in pattern:
265
+ s=''
266
+ if '?' not in j:
267
+ for i in j:
268
+ if i.isdigit() or i=='.' or i=='-' or i=='/' or i=='+' or i=='%': s=str(s)+str(i)
269
+ elif i==':':
270
+ if s=='': s='0'
271
+ size=max(math.ceil(float(eval(s))), size)
272
+ s=''
273
+ elif s!='': break
274
+ if s=='': s='0'
275
+ if s=='': s='0'
276
+ size=max(size, eval(s))
277
+
278
+ if isinstance(self.audio,numpy.ndarray): self.audio=numpy.ndarray.tolist(self.audio)
279
+ if self.beatmap.dtype!='int32': self.beatmap=self.beatmap.astype(int)
280
+
281
+ #beat=[]
282
+ #start=audio[:beatmap[0]]
283
+ #end=audio[beatmap[-1]:audio[-1]]
284
+ #for i in range(len(beatmap)-1):
285
+ # beat[i]=audio[beatmap[i]:beatmap[i+1]]
286
+
287
+ # audio is a tuple with l and r channels
288
+ #print(len(audio))
289
+
290
+ self.audio=(self.audio[0], self.audio[1])
291
+ #print(beatmap[0], audio[0][100])
292
+ result=(self.audio[0][:self.beatmap[0]],self.audio[1][:self.beatmap[0]])
293
+ beat=numpy.asarray([[],[]])
294
+
295
+ # size, iterations are integers
296
+ size=int(max(size//1, 1))
297
+ iterations=int(len(self.beatmap)//size)
298
+
299
+ # add beat to the end
300
+ self.beatmap=numpy.append(self.beatmap, len(self.audio[0]))
301
+
302
+ def beatswap_getnum(i: str, c: str):
303
+ if c in i:
304
+ try:
305
+ x=i.index(c)+1
306
+ z=''
307
+ try:
308
+ while i[x].isdigit() or i[x]=='.' or i[x]=='-' or i[x]=='/' or i[x]=='+' or i[x]=='%':
309
+ z+=i[x]
310
+ x+=1
311
+ return z
312
+ except IndexError:
313
+ return z
314
+ except ValueError: return None
315
+
316
+ #print(size, iterations)
317
+ # processing
318
+ for j in range(iterations):
319
+ for i in pattern:
320
+ if '!' not in i:
321
+ n,s,st,reverse,z=0,'',None,False,None
322
+ for c in i:
323
+ n+=1
324
+ #print('c =', s, ', st =', st, ', s =', s, ', n =,',n)
325
+
326
+ # Get the character
327
+ if c.isdigit() or c=='.' or c=='-' or c=='/' or c=='+' or c=='%':
328
+ s=str(s)+str(c)
329
+
330
+ # If character is : - get start
331
+ elif s!='' and c==':':
332
+ #print ('Beat start:',s,'=', eval(s),'=',int(eval(s)//1), '+',j,'*',size,' =',int(eval(s)//1)+j*size, ', mod=',eval(s)%1)
333
+ try: st=self.beatmap[int(eval(s)//1)+j*size ] + eval(s)%1* (self.beatmap[int(eval(s)//1)+j*size +1] - self.beatmap[int(eval(s)//1)+j*size])
334
+ except IndexError: break
335
+ s=''
336
+
337
+ # create a beat
338
+ if s!='' and (n==len(i) or not(c.isdigit() or c=='.' or c=='-' or c=='/' or c=='+' or c=='%')):
339
+
340
+ # start already exists
341
+ if st is not None:
342
+ #print ('Beat end: ',s,'=', eval(s),'=',int(eval(s)//1), '+',j,'*',size,' =',int(eval(s)//1)+j*size, ', mod=',eval(s)%1)
343
+ try:
344
+ s=self.beatmap[int(eval(s)//1)+j*size ] + eval(s)%1* (self.beatmap[int(eval(s)//1)+j*size +1] - self.beatmap[int(eval(s)//1)+j*size])
345
+ #print(s)
346
+ except IndexError: break
347
+ else:
348
+ # start doesn't exist
349
+ #print ('Beat start:',s,'=', eval(s),'=',int(eval(s)//1), '+',j,'*',size,'- 1 =',int(eval(s)//1)+j*size, ', mod=',eval(s)%1)
350
+ #print ('Beat end: ',s,'=', eval(s),'=',int(eval(s)//1), '+',j,'*',size,' =',int(eval(s)//1)+j*size+1, ', mod=',eval(s)%1)
351
+ try:
352
+ st=self.beatmap[int(eval(s)//1)+j*size-1 ] + eval(s)%1* (self.beatmap[int(eval(s)//1)+j*size +1] - self.beatmap[int(eval(s)//1)+j*size])
353
+ s=self.beatmap[int(eval(s)//1)+j*size ] + eval(s)%1* (self.beatmap[int(eval(s)//1)+j*size +1] - self.beatmap[int(eval(s)//1)+j*size])
354
+ except IndexError: break
355
+
356
+ if st>s:
357
+ s, st=st, s
358
+ reverse=True
359
+
360
+ # create the beat
361
+ if len(self.audio)>1:
362
+ if smoothing_mode=='add': beat=numpy.asarray([self.audio[0][int(st):int(s)],self.audio[1][int(st):int(s)]])
363
+ else: beat=numpy.asarray([self.audio[0][int(st):int(s)-smoothing],self.audio[1][int(st):int(s)-smoothing]])
364
+ else:
365
+ if smoothing_mode=='add': beat=numpy.asarray([self.audio[0][int(st):int(s)]])
366
+ else: beat=numpy.asarray([self.audio[0][int(st):int(s)-smoothing]])
367
+
368
+ # process the beat
369
+ # channels
370
+ z=beatswap_getnum(i,'c')
371
+ if z is not None:
372
+ if z=='': beat[0],beat[1]=beat[1],beat[0]
373
+ elif eval(z)==0:beat[0]*=0
374
+ else:beat[1]*=0
375
+
376
+ # volume
377
+ z=beatswap_getnum(i,'v')
378
+ if z is not None:
379
+ if z=='': z='0'
380
+ beat*=eval(z)
381
+
382
+ z=beatswap_getnum(i,'t')
383
+ if z is not None:
384
+ if z=='': z='2'
385
+ beat**=1/eval(z)
386
+
387
+ # speed
388
+ z=beatswap_getnum(i,'s')
389
+ if z is not None:
390
+ if z=='': z='2'
391
+ z=eval(z)
392
+ if z<1:
393
+ beat=numpy.asarray((numpy.repeat(beat[0],int(1//z)),numpy.repeat(beat[1],int(1//z))))
394
+ else:
395
+ beat=numpy.asarray((beat[0,::int(z)],beat[1,::int(z)]))
396
+
397
+ # bitcrush
398
+ z=beatswap_getnum(i,'b')
399
+ if z is not None:
400
+ if z=='': z='3'
401
+ z=1/eval(z)
402
+ if z<1: beat=beat*z
403
+ beat=numpy.around(beat, max(int(z), 1))
404
+ if z<1: beat=beat/z
405
+
406
+ # downsample
407
+ z=beatswap_getnum(i,'d')
408
+ if z is not None:
409
+ if z=='': z='3'
410
+ z=int(eval(z))
411
+ beat=numpy.asarray((numpy.repeat(beat[0,::z],z),numpy.repeat(beat[1,::z],z)))
412
+
413
+ # convert to list
414
+ beat=beat.tolist()
415
+
416
+ # effects with list
417
+ # reverse
418
+ if ('r' in i and reverse is False) or (reverse is True and 'r' not in i):
419
+ beat=(beat[0][::-1],beat[1][::-1] )
420
+
421
+ # add beat to the result
422
+ for a in range(len(self.audio)):
423
+ #print('Adding beat... a, s, st:', a, s, st, sep=', ')
424
+ #print(result[a][-1])
425
+ #print(beat[a][0])
426
+ if smoothing>0: result[a].extend(numpy.linspace(result[a][-1],beat[a][0],smoothing))
427
+ result[a].extend(beat[a])
428
+ #print(len(result[0]))
429
+
430
+ #
431
+ break
432
+ #print(time.process_time() - benchmark)
433
+
434
+ self.audio = result
435
+
436
+ def beatsample(self, audio2, shift=0):
437
+ try: l=len(audio2[0])
438
+ except (TypeError, IndexError):
439
+ l=len(audio2)
440
+ audio2=numpy.vstack((audio2,audio2))
441
+ for i in range(len(self.beatmap)):
442
+ try: self.audio[:,int(self.beatmap[i]) + int(float(shift) * (int(self.beatmap[i+1])-int(self.beatmap[i]))) : int(self.beatmap[i])+int(float(shift) * (int(self.beatmap[i+1])-int(self.beatmap[i])))+int(l)]+=audio2
443
+ except (IndexError, ValueError): pass
444
+
445
+ def sidechain(self, audio2, shift=0, smoothing=40):
446
+ try: l=len(audio2[0])
447
+ except (TypeError, IndexError):
448
+ l=len(audio2)
449
+ audio2=numpy.vstack((audio2,audio2))
450
+ for i in range(len(self.beatmap)):
451
+ try: self.audio[:,int(self.beatmap[i])-smoothing + int(float(shift) * (int(self.beatmap[i+1])-int(self.beatmap[i]))) : int(self.beatmap[i])-smoothing+int(float(shift) * (int(self.beatmap[i+1])-int(self.beatmap[i])))+int(l)]*=audio2
452
+ except (IndexError, ValueError): break
453
+
454
+ def quick_beatswap(self, output='', pattern=None, scale=1, shift=0, start=0, end=None, autotrim=True, autoscale=False, autoinsert=False, suffix='_BeatSwap', lib='madmom.BeatDetectionProcessor'):
455
+
456
+ if self.beatmap is None: song.analyze_beats(self,lib=lib)
457
+ if autotrim is True: song.audio_autotrim(self)
458
+ save=self.beatmap
459
+ if autoscale is True: song.beatmap_autoscale(self)
460
+ if shift!=0: song.beatmap_shift(self,shift)
461
+ if scale!=1: song.beatmap_scale(self,scale)
462
+ if autoinsert is True: song.beatmap_autoinsert(self)
463
+ if start!=0 or end is not None: song.beatmap_trim(self,start, end)
464
+ song.beatswap(self,pattern)
465
+
466
+ if output is not None:
467
+ if not (output.lower().endswith('.mp3') or output.lower().endswith('.wav') or output.lower().endswith('.flac') or output.lower().endswith('.ogg') or
468
+ output.lower().endswith('.aac') or output.lower().endswith('.ac3') or output.lower().endswith('.aiff') or output.lower().endswith('.wma')):
469
+ output=output+''.join(''.join(self.filename.split('/')[-1]).split('.')[:-1])+suffix+'.mp3'
470
+ song.write_audio(self,output)
471
+
472
+ self.beatmap=save
473
+
474
+
475
+ def quick_sidechain(self, output='', audio2=None, scale=1, shift=0, start=0, end=None, autotrim=True, autoscale=False, autoinsert=False, filename2=None, suffix='_Sidechain', lib='madmom.BeatDetectionProcessor'):
476
+
477
+ if filename2 is None and audio2 is None:
478
+ audio2=generate_sidechain()
479
+
480
+ if audio2 is None:
481
+ audio2, samplerate2=open_audio(filename2)
482
+
483
+ if self.beatmap is None: song.analyze_beats(self,lib=lib)
484
+ if autotrim is True: song.audio_autotrim(self)
485
+ save=self.beatmap
486
+ if autoscale is True: song.beatmap_autoscale(self)
487
+ if shift!=0: song.beatmap_shift(self,shift)
488
+ if scale!=1: song.beatmap_scale(self,scale)
489
+ if autoinsert is True: song.beatmap_autoinsert(self)
490
+ if start!=0 or end is not None: song.beatmap_trim(self,start, end)
491
+ song.sidechain(self,audio2)
492
+
493
+ if output is not None:
494
+ if not (output.lower().endswith('.mp3') or output.lower().endswith('.wav') or output.lower().endswith('.flac') or output.lower().endswith('.ogg') or
495
+ output.lower().endswith('.aac') or output.lower().endswith('.ac3') or output.lower().endswith('.aiff') or output.lower().endswith('.wma')):
496
+ output=output+''.join(''.join(self.filename.split('/')[-1]).split('.')[:-1])+suffix+'.mp3'
497
+ song.write_audio(self,output)
498
+
499
+ self.beatmap=save
500
+
501
+ def quick_beatsample(self, output='', filename2=None, scale=1, shift=0, start=0, end=None, autotrim=True, autoscale=False, autoinsert=False, audio2=None, suffix='_BeatSample', lib='madmom.BeatDetectionProcessor'):
502
+ if filename2 is None and audio2 is None:
503
+ from tkinter.filedialog import askopenfilename
504
+ filename2 = askopenfilename(title='select sidechain impulse', filetypes=[("mp3", ".mp3"),("wav", ".wav"),("flac", ".flac"),("ogg", ".ogg"),("wma", ".wma")])
505
+
506
+ if audio2 is None:
507
+ audio2, samplerate2=open_audio(filename2)
508
+
509
+ if self.beatmap is None: song.analyze_beats(self,lib=lib)
510
+ if autotrim is True: song.audio_autotrim(self)
511
+ save=numpy.copy(self.beatmap)
512
+ if autoscale is True: song.beatmap_autoscale(self)
513
+ if shift!=0: song.beatmap_shift(self,shift)
514
+ if scale!=1: song.beatmap_scale(self,scale)
515
+ if autoinsert is True: song.beatmap_autoinsert(self)
516
+ if start!=0 or end is not None: song.beatmap_trim(self,start, end)
517
+ song.beatsample(self,audio2)
518
+
519
+ if output is not None:
520
+ if not (output.lower().endswith('.mp3') or output.lower().endswith('.wav') or output.lower().endswith('.flac') or output.lower().endswith('.ogg') or
521
+ output.lower().endswith('.aac') or output.lower().endswith('.ac3') or output.lower().endswith('.aiff') or output.lower().endswith('.wma')):
522
+ output=output+''.join(''.join(self.filename.split('/')[-1]).split('.')[:-1])+suffix+'.mp3'
523
+ song.write_audio(self,output)
524
+ self.beatmap=save
525
+
app.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import BeatManipulator as bm
3
+ def BeatSwap(pattern: str):
4
+ audio=bm.song()
5
+ audio.quick_beatswap(output=None, pattern=pattern)
6
+ return audio.audio
7
+
8
+ ui=gr.Interface (fn=BeatSwap,inputs="audio",outputs="audio" )
9
+ ui.launch
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ numpy
2
+ madmom
3
+ soundfile