|
|
|
|
|
|
|
|
|
import os |
|
import time as reqtime |
|
import datetime |
|
from pytz import timezone |
|
|
|
import gradio as gr |
|
|
|
import numpy as np |
|
|
|
import os |
|
import random |
|
from collections import Counter |
|
|
|
import TMIDIX |
|
|
|
from midi_to_colab_audio import midi_to_colab_audio |
|
|
|
|
|
|
|
def Generate_Chords_Progression(total_song_length_in_chords_chunks, |
|
chords_chunks_memory_length, |
|
chord_time_step, |
|
melody_MIDI_patch_number, |
|
chords_progression_MIDI_patch_number, |
|
base_MIDI_patch_number |
|
): |
|
|
|
print('=' * 70) |
|
print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) |
|
start_time = reqtime.time() |
|
|
|
print('=' * 70) |
|
|
|
print('=' * 70) |
|
print('Ultimate MIDI Classifier') |
|
print('=' * 70) |
|
|
|
print('Input MIDI file name:', fn) |
|
|
|
print('=' * 70) |
|
|
|
print('Done!') |
|
print('=' * 70) |
|
|
|
classification_summary_string = '=' * 70 |
|
classification_summary_string += '\n' |
|
|
|
samples_overlap = 340 - chunk_size // input_sampling_resolution // 3 |
|
|
|
print('Composition has', notes_counter, 'notes') |
|
print('=' * 70) |
|
print('Composition was split into' , len(input_data), 'samples', 'of 340 notes each with', samples_overlap, 'notes overlap') |
|
print('=' * 70) |
|
print('Number of notes in all composition samples:', len(input_data) * 340) |
|
print('=' * 70) |
|
|
|
classification_summary_string += 'Composition has ' + str(notes_counter) + ' notes\n' |
|
classification_summary_string += '=' * 70 |
|
classification_summary_string += '\n' |
|
classification_summary_string += 'Composition was split into ' + 'samples of 340 notes each with ' + str(samples_overlap) + ' notes overlap\n' |
|
classification_summary_string += 'Number of notes in all composition samples: ' + str(len(input_data) * 340) + '\n' |
|
classification_summary_string += '=' * 70 |
|
classification_summary_string += '\n' |
|
|
|
|
|
|
|
print('=' * 70) |
|
print('Ultimate MIDI Classifier') |
|
print('=' * 70) |
|
|
|
print('=' * 70) |
|
print('Done!') |
|
print('=' * 70) |
|
|
|
result_toks = [final_result[0]-512, final_result[1]-512] |
|
mc_song_artist = song_artist_tokens_to_song_artist(result_toks) |
|
gidx = genre_labels_fnames.index(mc_song_artist) |
|
mc_genre = genre_labels[gidx][1] |
|
|
|
print('Most common classification genre label:', mc_genre) |
|
print('Most common classification song-artist label:', mc_song_artist) |
|
print('Most common song-artist classification label ratio:' , results.count(final_result) / len(results)) |
|
print('=' * 70) |
|
|
|
classification_summary_string += 'Most common classification genre label: ' + str(mc_genre) + '\n' |
|
classification_summary_string += 'Most common classification song-artist label: ' + str(mc_song_artist) + '\n' |
|
classification_summary_string += 'Most common song-artist classification label ratio: '+ str(results.count(final_result) / len(results)) + '\n' |
|
classification_summary_string += '=' * 70 |
|
classification_summary_string += '\n' |
|
|
|
print('All classification labels summary:') |
|
print('=' * 70) |
|
|
|
all_artists_labels = [] |
|
|
|
for i, res in enumerate(results): |
|
result_toks = [res[0]-512, res[1]-512] |
|
song_artist = song_artist_tokens_to_song_artist(result_toks) |
|
gidx = genre_labels_fnames.index(song_artist) |
|
genre = genre_labels[gidx][1] |
|
|
|
print('Notes', i*(340-samples_overlap), '-', (i*(340-samples_overlap))+340, '===', genre, '---', song_artist) |
|
classification_summary_string += 'Notes ' + str(i*samples_overlap) + ' - ' + str((i*samples_overlap)+340) + ' === ' + str(genre) + ' --- ' + str(song_artist) + '\n' |
|
|
|
artist_label = str_strip_artist(song_artist.split(' --- ')[1]) |
|
|
|
all_artists_labels.append(artist_label) |
|
|
|
classification_summary_string += '=' * 70 |
|
classification_summary_string += '\n' |
|
print('=' * 70) |
|
|
|
mode_artist_label = mode(all_artists_labels) |
|
mode_artist_label_count = all_artists_labels.count(mode_artist_label) |
|
|
|
print('Aggregated artist classification label:', mode_artist_label) |
|
print('Aggregated artist classification label ratio:', mode_artist_label_count / len(all_artists_labels)) |
|
classification_summary_string += 'Aggregated artist classification label: ' + str(mode_artist_label) + '\n' |
|
classification_summary_string += 'Aggregated artist classification label ratio: ' + str(mode_artist_label_count / len(all_artists_labels)) + '\n' |
|
classification_summary_string += '=' * 70 |
|
classification_summary_string += '\n' |
|
|
|
print('=' * 70) |
|
print('Done!') |
|
print('=' * 70) |
|
|
|
|
|
print('Rendering results...') |
|
|
|
print('=' * 70) |
|
|
|
fn1 = "Melody2Song-Seq2Seq-Music-Transformer-Composition" |
|
|
|
detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f, |
|
output_signature = 'Melody2Song Seq2Seq Music Transformer', |
|
output_file_name = fn1, |
|
track_name='Project Los Angeles', |
|
list_of_MIDI_patches=patches |
|
) |
|
|
|
new_fn = fn1+'.mid' |
|
|
|
|
|
audio = midi_to_colab_audio(new_fn, |
|
soundfont_path=soundfont, |
|
sample_rate=16000, |
|
volume_scale=10, |
|
output_for_gradio=True |
|
) |
|
|
|
print('Done!') |
|
print('=' * 70) |
|
|
|
|
|
|
|
output_midi_title = str(fn1) |
|
output_midi_summary = str(song_f[:3]) |
|
output_midi = str(new_fn) |
|
output_audio = (16000, audio) |
|
|
|
output_plot = TMIDIX.plot_ms_SONG(song_f, plot_title=output_midi, return_plt=True) |
|
|
|
|
|
|
|
print('-' * 70) |
|
print('Req end time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) |
|
print('-' * 70) |
|
print('Req execution time:', (reqtime.time() - start_time), 'sec') |
|
|
|
return output_audio, output_plot, output_midi, output_cp_summary |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
PDT = timezone('US/Pacific') |
|
|
|
print('=' * 70) |
|
print('App start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) |
|
print('=' * 70) |
|
|
|
print('=' * 70) |
|
print('Loading Ultimate MIDI Classifier labels...') |
|
print('=' * 70) |
|
good_chords_chunks = TMIDIX.Tegridy_Any_Pickle_File_Reader('pitches_chords_progressions_5_3_15') |
|
|
|
print('=' * 70) |
|
print('Done!') |
|
print('=' * 70) |
|
|
|
app = gr.Blocks() |
|
with app: |
|
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Chords Progressions Generator</h1>") |
|
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Generate unique chords progressions</h1>") |
|
gr.Markdown( |
|
"![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.Chords-Progressions-Generator&style=flat)\n\n" |
|
"This is a demo for Tegridy MIDI Dataset\n\n" |
|
"Check out [Tegridy MIDI Dataset](https://github.com/asigalov61/Tegridy-MIDI-Dataset) on GitHub!\n\n" |
|
"[Open In Colab]" |
|
"(https://colab.research.google.com/github/asigalov61/Tegridy-MIDI-Dataset/blob/master/Chords-Progressions/Pitches_Chords_Progressions_Generator.ipynb)" |
|
" for all options, faster execution and endless generation" |
|
) |
|
|
|
gr.Markdown("## Select generation options") |
|
|
|
total_song_length_in_chords_chunks = gr.Slider(5, 20, value=13, step=1, label="Total song length in chords chunks") |
|
chords_chunks_memory_length = gr.Slider(-1, 30, value=-1, step=1, label="Chords chunks memory length") |
|
chord_time_step = gr.Slider(100, 1000, value=500, step=50, label="Chord time step") |
|
melody_MIDI_patch_number = gr.Slider(0, 127, value=40, step=1, label="Melody MIDI patch number") |
|
chords_progression_MIDI_patch_number = gr.Slider(0, 127, value=0, step=1, label="Chords progression MIDI patch number") |
|
base_MIDI_patch_number = gr.Slider(0, 127, value=35, step=1, label="Base MIDI patch number") |
|
|
|
run_btn = gr.Button("generate", variant="primary") |
|
|
|
gr.Markdown("## Generation results") |
|
|
|
output_audio = gr.Audio(label="Output MIDI audio", format="wav", elem_id="midi_audio") |
|
output_plot = gr.Plot(label="Output MIDI score plot") |
|
output_midi = gr.File(label="Output MIDI file", file_types=[".mid"]) |
|
|
|
output_cp_summary = gr.Textbox(label="Generated chords progression info and stats") |
|
|
|
run_event = run_btn.click(Generate_Chords_Progression, |
|
[total_song_length_in_chords_chunks, |
|
chords_chunks_memory_length, |
|
chord_time_step, |
|
melody_MIDI_patch_number, |
|
chords_progression_MIDI_patch_number, |
|
base_MIDI_patch_number], |
|
[output_audio, output_plot, output_midi, output_cp_summary] |
|
) |
|
|
|
app.queue().launch() |