|
|
|
|
|
import os |
|
import time as reqtime |
|
import datetime |
|
from pytz import timezone |
|
|
|
import torch |
|
|
|
import spaces |
|
import gradio as gr |
|
|
|
from x_transformer_1_23_2 import * |
|
import random |
|
from statistics import mode |
|
import tqdm |
|
|
|
from midi_to_colab_audio import midi_to_colab_audio |
|
import TMIDIX |
|
|
|
import matplotlib.pyplot as plt |
|
|
|
in_space = os.getenv("SYSTEM") == "spaces" |
|
|
|
|
|
|
|
@spaces.GPU |
|
def ClassifyMIDI(input_midi): |
|
|
|
print('=' * 70) |
|
print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) |
|
start_time = reqtime.time() |
|
|
|
print('=' * 70) |
|
|
|
fn = os.path.basename(input_midi.name) |
|
fn1 = fn.split('.')[0] |
|
|
|
print('=' * 70) |
|
print('Ultimate MIDI Classifier') |
|
print('=' * 70) |
|
|
|
print('Input MIDI file name:', fn) |
|
|
|
print('=' * 70) |
|
print('Loading MIDI file...') |
|
|
|
midi_name = fn |
|
|
|
raw_score = TMIDIX.midi2single_track_ms_score(open(input_midi.name, 'rb').read()) |
|
|
|
escore_notes = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True)[0] |
|
|
|
|
|
|
|
|
|
escore_notes = TMIDIX.augment_enhanced_score_notes(escore_notes, timings_divider=32) |
|
|
|
escore_notes = [e for e in escore_notes if e[6] < 80 or e[6] == 128] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
melody_chords = [] |
|
|
|
|
|
|
|
|
|
|
|
pe = escore_notes[0] |
|
|
|
pitches = [] |
|
|
|
notes_counter = 0 |
|
|
|
for e in escore_notes: |
|
|
|
|
|
|
|
|
|
delta_time = max(0, min(127, e[1]-pe[1])) |
|
|
|
if delta_time != 0: |
|
pitches = [] |
|
|
|
|
|
|
|
dur = max(1, min(127, e[2])) |
|
|
|
|
|
pat = max(0, min(128, e[6])) |
|
|
|
|
|
|
|
if pat == 128: |
|
ptc = max(1, min(127, e[4]))+128 |
|
else: |
|
ptc = max(1, min(127, e[4])) |
|
|
|
|
|
|
|
|
|
|
|
|
|
if ptc not in pitches: |
|
melody_chords.extend([delta_time, dur+128, ptc+256]) |
|
pitches.append(ptc) |
|
notes_counter += 1 |
|
|
|
pe = e |
|
|
|
|
|
|
|
print('Done!') |
|
print('=' * 70) |
|
|
|
print('Sampling score...') |
|
|
|
chunk_size = 1020 |
|
|
|
score = melody_chords |
|
|
|
input_data = [] |
|
|
|
for i in range(0, len(score)-chunk_size, chunk_size // classification_sampling_resolution): |
|
schunk = score[i:i+chunk_size] |
|
|
|
if len(schunk) == chunk_size: |
|
|
|
td = [937] |
|
|
|
td.extend(schunk) |
|
|
|
td.extend([938]) |
|
|
|
input_data.append(td) |
|
|
|
print('Done!') |
|
print('=' * 70) |
|
|
|
|
|
|
|
classification_summary_string = '=' * 70 |
|
classification_summary_string += '\n' |
|
|
|
print('Composition has', notes_counter, 'notes') |
|
print('=' * 70) |
|
print('Composition was split into' , len(input_data), 'samples', 'of 340 notes each with', 340 - chunk_size // classification_sampling_resolution // 3, 'notes overlap') |
|
print('=' * 70) |
|
print('Number of notes in all composition samples:', len(input_data) * 340) |
|
print('=' * 70) |
|
|
|
classification_summary_string += 'Composition has ' + str(notes_counter) + ' notes\n' |
|
classification_summary_string += '=' * 70 |
|
classification_summary_string += '\n' |
|
classification_summary_string += 'Composition was split into ' + 'samples of 340 notes each with ' + str(340 - chunk_size // classification_sampling_resolution // 3) + ' notes overlap\n' |
|
classification_summary_string += 'Number of notes in all composition samples: ' + str(len(input_data) * 340) + '\n' |
|
classification_summary_string += '=' * 70 |
|
classification_summary_string += '\n' |
|
|
|
print('Loading model...') |
|
|
|
SEQ_LEN = 1026 |
|
PAD_IDX = 940 |
|
DEVICE = 'cuda' |
|
|
|
|
|
|
|
model = TransformerWrapper( |
|
num_tokens = PAD_IDX+1, |
|
max_seq_len = SEQ_LEN, |
|
attn_layers = Decoder(dim = 1024, depth = 24, heads = 32, attn_flash = True) |
|
) |
|
|
|
model = AutoregressiveWrapper(model, ignore_index=PAD_IDX, pad_value=PAD_IDX) |
|
|
|
model = torch.nn.DataParallel(model) |
|
|
|
model.to(DEVICE) |
|
|
|
print('=' * 70) |
|
|
|
print('Loading model checkpoint...') |
|
|
|
model.load_state_dict( |
|
torch.load('Ultimate_MIDI_Classifier_Trained_Model_29886_steps_0.556_loss_0.8339_acc.pth', |
|
map_location=DEVICE)) |
|
print('=' * 70) |
|
|
|
if DEVICE == 'cpu': |
|
dtype = torch.bfloat16 |
|
else: |
|
dtype = torch.bfloat16 |
|
|
|
ctx = torch.amp.autocast(device_type=DEVICE, dtype=dtype) |
|
|
|
print('Done!') |
|
print('=' * 70) |
|
|
|
|
|
|
|
print('=' * 70) |
|
print('Ultimate MIDI Classifier') |
|
print('=' * 70) |
|
print('Classifying...') |
|
|
|
torch.cuda.empty_cache() |
|
|
|
model.eval() |
|
|
|
artist_results = [] |
|
song_results = [] |
|
|
|
results = [] |
|
|
|
for input in input_data: |
|
|
|
x = torch.tensor(input[:1022], dtype=torch.long, device='cuda') |
|
|
|
with ctx: |
|
out = model.module.generate(x, |
|
2, |
|
filter_logits_fn=top_k, |
|
filter_kwargs={'k': 1}, |
|
temperature=0.9, |
|
return_prime=False, |
|
verbose=False) |
|
|
|
result = tuple(out[0].tolist()) |
|
|
|
results.append(result) |
|
|
|
all_results_labels = [classifier_labels[0][r-384] for r in results] |
|
final_result = mode(results) |
|
|
|
print('Done!') |
|
print('=' * 70) |
|
|
|
print('Most common classification label:', classifier_labels[0][final_result-384]) |
|
print('Most common classification label ratio:' , results.count(final_result) / len(results)) |
|
print('Most common classification label index', final_result) |
|
print('=' * 70) |
|
|
|
classification_summary_string += 'Most common classification label: ' + str(classifier_labels[0][final_result-384]) + '\n' |
|
classification_summary_string += 'Most common classification label ratio: ' + str(results.count(final_result) / len(results)) + '\n' |
|
classification_summary_string += 'Most common classification label index '+ str(final_result) + '\n' |
|
classification_summary_string += '=' * 70 |
|
classification_summary_string += '\n' |
|
|
|
print('All classification labels summary:') |
|
print('=' * 70) |
|
|
|
for i, a in enumerate(all_results_labels): |
|
print('Notes', i*85, '-', (i*85)+340, '===', a) |
|
classification_summary_string += 'Notes ' + str(i*85) + ' - ' + str((i*85)+340) + ' === ' + str(a) + '\n' |
|
|
|
classification_summary_string += '=' * 70 |
|
classification_summary_string += '\n' |
|
|
|
print('=' * 70) |
|
print('Done!') |
|
print('=' * 70) |
|
|
|
|
|
print('Rendering results...') |
|
|
|
score_idx = processed_scores_labels.index(classifier_labels[0][final_result-384]) |
|
|
|
output_score = processed_scores[score_idx][1][:6000] |
|
|
|
print('=' * 70) |
|
print('Sample INTs', results[:15]) |
|
print('=' * 70) |
|
|
|
fn1 = processed_scores[score_idx][0] |
|
|
|
output_score = TMIDIX.recalculate_score_timings(output_score) |
|
|
|
output_score, patches, overflow_patches = TMIDIX.patch_enhanced_score_notes(output_score) |
|
|
|
detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(output_score, |
|
output_signature = 'Advanced MIDI Classifier', |
|
output_file_name = fn1, |
|
track_name='Project Los Angeles', |
|
list_of_MIDI_patches=patches, |
|
timings_multiplier=16 |
|
) |
|
|
|
new_fn = fn1+'.mid' |
|
|
|
|
|
audio = midi_to_colab_audio(new_fn, |
|
soundfont_path=soundfont, |
|
sample_rate=16000, |
|
volume_scale=10, |
|
output_for_gradio=True |
|
) |
|
|
|
print('Done!') |
|
print('=' * 70) |
|
|
|
|
|
|
|
output_midi_title = str(fn1) |
|
output_midi_summary = classification_summary_string |
|
output_midi = str(new_fn) |
|
output_audio = (16000, audio) |
|
|
|
output_plot = TMIDIX.plot_ms_SONG(output_score, plot_title=output_midi, return_plt=True, timings_multiplier=16) |
|
|
|
print('Output MIDI file name:', output_midi) |
|
print('Output MIDI title:', output_midi_title) |
|
print('=' * 70) |
|
|
|
|
|
|
|
|
|
print('-' * 70) |
|
print('Req end time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) |
|
print('-' * 70) |
|
print('Req execution time:', (reqtime.time() - start_time), 'sec') |
|
|
|
return output_midi_title, output_midi_summary, output_midi, output_audio, output_plot |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
PDT = timezone('US/Pacific') |
|
|
|
print('=' * 70) |
|
print('App start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) |
|
print('=' * 70) |
|
|
|
soundfont = "SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2" |
|
|
|
|
|
|
|
|
|
|
|
def str_strip_song(string): |
|
if string is not None: |
|
string = string.replace('-', ' ').replace('_', ' ').replace('=', ' ') |
|
str1 = re.compile('[^a-zA-Z ]').sub('', string) |
|
return re.sub(' +', ' ', str1).strip().title() |
|
else: |
|
return '' |
|
|
|
def str_strip_artist(string): |
|
if string is not None: |
|
string = string.replace('-', ' ').replace('_', ' ').replace('=', ' ') |
|
str1 = re.compile('[^0-9a-zA-Z ]').sub('', string) |
|
return re.sub(' +', ' ', str1).strip().title() |
|
else: |
|
return '' |
|
|
|
def song_artist_to_song_artist_tokens(file_name): |
|
idx = classifier_labels.index(file_name) |
|
|
|
tok1 = idx // 424 |
|
tok2 = idx % 424 |
|
|
|
return [tok1, tok2] |
|
|
|
def song_artist_tokens_to_song_artist(file_name_tokens): |
|
|
|
tok1 = file_name_tokens[0] |
|
tok2 = file_name_tokens[1] |
|
|
|
idx = (tok1 * 424) + tok2 |
|
|
|
return classifier_labels[idx] |
|
|
|
|
|
|
|
print('=' * 70) |
|
print('Loading Ultimate MIDI Classifier labels...') |
|
print('=' * 70) |
|
classifier_labels = TMIDIX.Tegridy_Any_Pickle_File_Reader('Ultimate-MIDI-Classifier/Data/Ultimate_MIDI_Classifier_Song_Artist_Labels') |
|
print('=' * 70) |
|
genre_labels = TMIDIX.Tegridy_Any_Pickle_File_Reader('Ultimate-MIDI-Classifier/Data/Ultimate_MIDI_Classifier_Music_Genre_Labels') |
|
genre_labels_fnames = [f[0] for f in genre_labels] |
|
print('=' * 70) |
|
print('Done!') |
|
print('=' * 70) |
|
|
|
app = gr.Blocks() |
|
with app: |
|
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Advanced MIDI Classifier</h1>") |
|
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Detailed MIDI classification with transformers</h1>") |
|
gr.Markdown( |
|
"![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.Advanced-MIDI-Classifier&style=flat)\n\n" |
|
"This is a demo for Annotated MIDI Dataset\n\n" |
|
"Check out [Annotated MIDI Dataset](https://huggingface.co/datasets/asigalov61/Annotated-MIDI-Dataset) on Hugging Face!\n\n" |
|
) |
|
|
|
input_midi = gr.File(label="Input MIDI", file_types=[".midi", ".mid", ".kar"]) |
|
|
|
run_btn = gr.Button("classify", variant="primary") |
|
|
|
gr.Markdown("## Classification results") |
|
|
|
output_midi_title = gr.Textbox(label="Best classification match MIDI title") |
|
output_midi_summary = gr.Textbox(label="MIDI classification summary") |
|
output_audio = gr.Audio(label="Best classification match MIDI audio", format="wav", elem_id="midi_audio") |
|
output_plot = gr.Plot(label="Best classification match MIDI score plot") |
|
output_midi = gr.File(label="Best classification match MIDI file", file_types=[".mid"]) |
|
|
|
run_event = run_btn.click(ClassifyMIDI, [input_midi], |
|
[output_midi_title, output_midi_summary, output_midi, output_audio, output_plot]) |
|
|
|
app.queue().launch() |