asigalov61
commited on
Commit
•
6c4cb55
1
Parent(s):
fd4c083
Update app.py
Browse files
app.py
CHANGED
@@ -66,86 +66,156 @@ def ClassifyMIDI(input_midi):
|
|
66 |
|
67 |
print('Done!')
|
68 |
print('=' * 70)
|
69 |
-
seed_melody = seed_melodies_data[input_melody_seed_number]
|
70 |
-
print('Input melody seed number:', input_melody_seed_number)
|
71 |
-
print('-' * 70)
|
72 |
|
73 |
#==================================================================
|
74 |
|
75 |
print('=' * 70)
|
76 |
-
|
77 |
-
print('Sample output events', seed_melody[:16])
|
78 |
-
print('=' * 70)
|
79 |
-
print('Generating...')
|
80 |
|
81 |
-
|
|
|
|
|
|
|
|
|
82 |
|
83 |
-
with ctx:
|
84 |
-
out = model.generate(x,
|
85 |
-
1536,
|
86 |
-
temperature=0.9,
|
87 |
-
return_prime=False,
|
88 |
-
verbose=False)
|
89 |
-
|
90 |
-
output = out[0].tolist()
|
91 |
-
|
92 |
print('=' * 70)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
print('Done!')
|
94 |
print('=' * 70)
|
95 |
|
96 |
-
|
97 |
-
print('Rendering results...')
|
98 |
|
|
|
99 |
print('=' * 70)
|
100 |
-
print('
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
print('=' * 70)
|
|
|
102 |
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
|
107 |
-
|
108 |
-
song_f = []
|
109 |
|
110 |
-
|
111 |
-
dur = 0
|
112 |
-
vel = 90
|
113 |
-
pitch = 0
|
114 |
-
channel = 0
|
115 |
|
116 |
-
|
117 |
-
patches[3] = 40
|
118 |
|
119 |
-
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
-
|
122 |
|
123 |
-
|
|
|
124 |
|
125 |
-
|
126 |
|
127 |
-
|
|
|
128 |
|
129 |
-
|
|
|
130 |
|
131 |
-
|
|
|
|
|
|
|
132 |
|
133 |
-
|
|
|
134 |
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
|
145 |
fn1 = "Melody2Song-Seq2Seq-Music-Transformer-Composition"
|
146 |
|
147 |
detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
|
148 |
-
output_signature = '
|
149 |
output_file_name = fn1,
|
150 |
track_name='Project Los Angeles',
|
151 |
list_of_MIDI_patches=patches
|
|
|
66 |
|
67 |
print('Done!')
|
68 |
print('=' * 70)
|
|
|
|
|
|
|
69 |
|
70 |
#==================================================================
|
71 |
|
72 |
print('=' * 70)
|
|
|
|
|
|
|
|
|
73 |
|
74 |
+
fn = os.path.basename(input_midi.name)
|
75 |
+
fn1 = fn.split('.')[0]
|
76 |
+
|
77 |
+
print('-' * 70)
|
78 |
+
print('Input file name:', fn)
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
print('=' * 70)
|
81 |
+
print('Loading MIDI file...')
|
82 |
+
|
83 |
+
midi_name = fn
|
84 |
+
|
85 |
+
raw_score = TMIDIX.midi2single_track_ms_score(open(input_midi.name, 'rb').read())
|
86 |
+
|
87 |
+
escore_notes = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True)[0]
|
88 |
+
|
89 |
+
escore = [e for e in TMIDIX.augment_enhanced_score_notes(escore_notes, timings_divider=32) if e[6] < 80]
|
90 |
+
|
91 |
+
cscore = TMIDIX.chordify_score([1000, escore])
|
92 |
+
|
93 |
+
#=======================================================
|
94 |
+
# MAIN PROCESSING CYCLE
|
95 |
+
#=======================================================
|
96 |
+
|
97 |
+
melody_chords = []
|
98 |
+
|
99 |
+
pe = cscore[0][0]
|
100 |
+
|
101 |
+
for c in cscore:
|
102 |
+
|
103 |
+
pitches = []
|
104 |
+
|
105 |
+
for e in c:
|
106 |
+
|
107 |
+
if e[4] not in pitches:
|
108 |
+
|
109 |
+
dtime = max(0, min(127, e[1]-pe[1]))
|
110 |
+
|
111 |
+
dur = max(1, min(127, e[2]))
|
112 |
+
ptc = max(1, min(127, e[4]))
|
113 |
+
|
114 |
+
melody_chords.append([dtime, dur, ptc])
|
115 |
+
|
116 |
+
pitches.append(ptc)
|
117 |
+
|
118 |
+
pe = e
|
119 |
+
|
120 |
+
#==============================================================
|
121 |
+
|
122 |
+
seq = []
|
123 |
+
input_data = []
|
124 |
+
|
125 |
+
notes_counter = 0
|
126 |
+
|
127 |
+
for mm in melody_chords:
|
128 |
+
|
129 |
+
time = mm[0]
|
130 |
+
dur = mm[1]
|
131 |
+
ptc = mm[2]
|
132 |
+
|
133 |
+
seq.extend([time, dur+128, ptc+256])
|
134 |
+
notes_counter += 1
|
135 |
+
|
136 |
+
for i in range(0, len(seq)-SEQ_LEN-4, (SEQ_LEN-4) // 2):
|
137 |
+
schunk = seq[i:i+SEQ_LEN-4]
|
138 |
+
input_data.append([14624] + schunk + [14625])
|
139 |
+
|
140 |
print('Done!')
|
141 |
print('=' * 70)
|
142 |
|
143 |
+
#==============================================================
|
|
|
144 |
|
145 |
+
print('Composition has', notes_counter, 'notes')
|
146 |
print('=' * 70)
|
147 |
+
print('Composition was split into' , len(input_data), 'chunks', 'of 340 notes each with 170 notes overlap')
|
148 |
+
print('Number of notes in all composition chunks:', len(input_data) * 340)
|
149 |
+
|
150 |
+
number_of_batches = 100 # @param {type:"slider", min:1, max:100, step:1}
|
151 |
+
|
152 |
+
# @markdown NOTE: You can increase the number of batches on high-ram GPUs for better classification
|
153 |
+
|
154 |
+
print('=' * 70)
|
155 |
+
print('Annotated MIDI Dataset Classifier')
|
156 |
print('=' * 70)
|
157 |
+
print('Classifying...')
|
158 |
|
159 |
+
torch.cuda.empty_cache()
|
160 |
+
|
161 |
+
model.eval()
|
162 |
|
163 |
+
results = []
|
|
|
164 |
|
165 |
+
for input in input_data:
|
|
|
|
|
|
|
|
|
166 |
|
167 |
+
x = torch.tensor([input[:1022]] * number_of_batches, dtype=torch.long, device='cuda')
|
|
|
168 |
|
169 |
+
with ctx:
|
170 |
+
out = model.generate(x,
|
171 |
+
1,
|
172 |
+
temperature=0.3,
|
173 |
+
return_prime=False,
|
174 |
+
verbose=False)
|
175 |
|
176 |
+
y = out.tolist()
|
177 |
|
178 |
+
output = [l[0] for l in y]
|
179 |
+
result = statistics.mode(output)
|
180 |
|
181 |
+
results.append(result)
|
182 |
|
183 |
+
all_results_labels = [classifier_labels[0][r-384] for r in results]
|
184 |
+
final_result = statistics.mode(results)
|
185 |
|
186 |
+
print('Done!')
|
187 |
+
print('=' * 70)
|
188 |
|
189 |
+
print('Most common classification label:', classifier_labels[0][final_result-384])
|
190 |
+
print('Most common classification label ratio:' , results.count(final_result) / len(results))
|
191 |
+
print('Most common classification label index', final_result)
|
192 |
+
print('=' * 70)
|
193 |
|
194 |
+
print('All classification labels summary:')
|
195 |
+
print('=' * 70)
|
196 |
|
197 |
+
for i, a in enumerate(all_results_labels):
|
198 |
+
print('Notes', i*170, '-', (i*170)+340, '===', a)
|
199 |
+
|
200 |
+
print('=' * 70)
|
201 |
+
|
202 |
+
print('=' * 70)
|
203 |
+
print('Done!')
|
204 |
+
print('=' * 70)
|
205 |
+
|
206 |
+
#===============================================================================
|
207 |
+
print('Rendering results...')
|
208 |
+
|
209 |
+
print('=' * 70)
|
210 |
+
print('Sample INTs', output[:15])
|
211 |
+
print('=' * 70)
|
212 |
+
|
213 |
+
out1 = output
|
214 |
|
215 |
fn1 = "Melody2Song-Seq2Seq-Music-Transformer-Composition"
|
216 |
|
217 |
detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
|
218 |
+
output_signature = 'Advanced MIDI Classifier',
|
219 |
output_file_name = fn1,
|
220 |
track_name='Project Los Angeles',
|
221 |
list_of_MIDI_patches=patches
|