|
|
|
"""Pitch Detection with SPICE |
|
|
|
Automatically generated by Colaboratory. |
|
|
|
Original file is located at |
|
https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/spice.ipynb |
|
|
|
##### Copyright 2020 The TensorFlow Hub Authors. |
|
|
|
Licensed under the Apache License, Version 2.0 (the "License"); |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""<table class="tfo-notebook-buttons" align="left"> |
|
<td> |
|
<a target="_blank" href="https://www.tensorflow.org/hub/tutorials/spice"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> |
|
</td> |
|
<td> |
|
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/spice.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> |
|
</td> |
|
<td> |
|
<a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/spice.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a> |
|
</td> |
|
<td> |
|
<a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/spice.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> |
|
</td> |
|
<td> |
|
<a href="https://tfhub.dev/google/spice/2"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a> |
|
</td> |
|
</table> |
|
|
|
# Pitch Detection with SPICE |
|
|
|
This colab will show you how to use the SPICE model downloaded from TensorFlow Hub. |
|
""" |
|
|
|
!sudo apt-get install -q -y timidity libsndfile1 |
|
|
|
|
|
!pip install pydub numba==0.48 librosa music21 |
|
|
|
import tensorflow as tf |
|
import tensorflow_hub as hub |
|
|
|
import numpy as np |
|
import matplotlib.pyplot as plt |
|
import librosa |
|
from librosa import display as librosadisplay |
|
|
|
import logging |
|
import math |
|
import statistics |
|
import sys |
|
|
|
from IPython.display import Audio, Javascript |
|
from scipy.io import wavfile |
|
|
|
from base64 import b64decode |
|
|
|
import music21 |
|
from pydub import AudioSegment |
|
|
|
logger = logging.getLogger() |
|
logger.setLevel(logging.ERROR) |
|
|
|
print("tensorflow: %s" % tf.__version__) |
|
|
|
|
|
"""# The audio input file |
|
Now the hardest part: Record your singing! :) |
|
|
|
We provide four methods to obtain an audio file: |
|
|
|
1. Record audio directly in colab |
|
2. Upload from your computer |
|
3. Use a file saved on Google Drive |
|
4. Download the file from the web |
|
|
|
Choose one of the four methods below. |
|
""" |
|
|
|
|
|
|
|
RECORD = """ |
|
const sleep = time => new Promise(resolve => setTimeout(resolve, time)) |
|
const b2text = blob => new Promise(resolve => { |
|
const reader = new FileReader() |
|
reader.onloadend = e => resolve(e.srcElement.result) |
|
reader.readAsDataURL(blob) |
|
}) |
|
var record = time => new Promise(async resolve => { |
|
stream = await navigator.mediaDevices.getUserMedia({ audio: true }) |
|
recorder = new MediaRecorder(stream) |
|
chunks = [] |
|
recorder.ondataavailable = e => chunks.push(e.data) |
|
recorder.start() |
|
await sleep(time) |
|
recorder.onstop = async ()=>{ |
|
blob = new Blob(chunks) |
|
text = await b2text(blob) |
|
resolve(text) |
|
} |
|
recorder.stop() |
|
}) |
|
""" |
|
|
|
def record(sec=5): |
|
try: |
|
from google.colab import output |
|
except ImportError: |
|
print('No possible to import output from google.colab') |
|
return '' |
|
else: |
|
print('Recording') |
|
display(Javascript(RECORD)) |
|
s = output.eval_js('record(%d)' % (sec*1000)) |
|
fname = 'recorded_audio.wav' |
|
print('Saving to', fname) |
|
b = b64decode(s.split(',')[1]) |
|
with open(fname, 'wb') as f: |
|
f.write(b) |
|
return fname |
|
|
|
|
|
INPUT_SOURCE = 'https://storage.googleapis.com/download.tensorflow.org/data/c-scale-metronome.wav' |
|
|
|
print('You selected', INPUT_SOURCE) |
|
|
|
if INPUT_SOURCE == 'RECORD': |
|
uploaded_file_name = record(5) |
|
elif INPUT_SOURCE == 'UPLOAD': |
|
try: |
|
from google.colab import files |
|
except ImportError: |
|
print("ImportError: files from google.colab seems to not be available") |
|
else: |
|
uploaded = files.upload() |
|
for fn in uploaded.keys(): |
|
print('User uploaded file "{name}" with length {length} bytes'.format( |
|
name=fn, length=len(uploaded[fn]))) |
|
uploaded_file_name = next(iter(uploaded)) |
|
print('Uploaded file: ' + uploaded_file_name) |
|
elif INPUT_SOURCE.startswith('./drive/'): |
|
try: |
|
from google.colab import drive |
|
except ImportError: |
|
print("ImportError: files from google.colab seems to not be available") |
|
else: |
|
drive.mount('/content/drive') |
|
|
|
|
|
gdrive_audio_file = 'YOUR_MUSIC_FILE.wav' |
|
uploaded_file_name = INPUT_SOURCE |
|
elif INPUT_SOURCE.startswith('http'): |
|
!wget --no-check-certificate 'https://storage.googleapis.com/download.tensorflow.org/data/c-scale-metronome.wav' -O c-scale.wav |
|
uploaded_file_name = 'c-scale.wav' |
|
else: |
|
print('Unrecognized input format!') |
|
print('Please select "RECORD", "UPLOAD", or specify a file hosted on Google Drive or a file from the web to download file to download') |
|
|
|
"""# Preparing the audio data |
|
|
|
Now we have the audio, let's convert it to the expected format and then listen to it! |
|
|
|
The SPICE model needs as input an audio file at a sampling rate of 16kHz and with only one channel (mono). |
|
|
|
To help you with this part, we created a function (`convert_audio_for_model`) to convert any wav file you have to the model's expected format: |
|
""" |
|
|
|
|
|
|
|
|
|
EXPECTED_SAMPLE_RATE = 16000 |
|
|
|
def convert_audio_for_model(user_file, output_file='converted_audio_file.wav'): |
|
audio = AudioSegment.from_file(user_file) |
|
audio = audio.set_frame_rate(EXPECTED_SAMPLE_RATE).set_channels(1) |
|
audio.export(output_file, format="wav") |
|
return output_file |
|
|
|
|
|
|
|
|
|
converted_audio_file = convert_audio_for_model(uploaded_file_name) |
|
|
|
|
|
sample_rate, audio_samples = wavfile.read(converted_audio_file, 'rb') |
|
|
|
|
|
duration = len(audio_samples)/sample_rate |
|
print(f'Sample rate: {sample_rate} Hz') |
|
print(f'Total duration: {duration:.2f}s') |
|
print(f'Size of the input: {len(audio_samples)}') |
|
|
|
|
|
Audio(audio_samples, rate=sample_rate) |
|
|
|
"""First thing, let's take a look at the waveform of our singing.""" |
|
|
|
|
|
_ = plt.plot(audio_samples) |
|
|
|
"""A more informative visualization is the [spectrogram](https://en.wikipedia.org/wiki/Spectrogram), which shows frequencies present over time. |
|
|
|
Here, we use a logarithmic frequency scale, to make the singing more clearly visible. |
|
|
|
""" |
|
|
|
MAX_ABS_INT16 = 32768.0 |
|
|
|
def plot_stft(x, sample_rate, show_black_and_white=False): |
|
x_stft = np.abs(librosa.stft(x, n_fft=2048)) |
|
fig, ax = plt.subplots() |
|
fig.set_size_inches(20, 10) |
|
x_stft_db = librosa.amplitude_to_db(x_stft, ref=np.max) |
|
if(show_black_and_white): |
|
librosadisplay.specshow(data=x_stft_db, y_axis='log', |
|
sr=sample_rate, cmap='gray_r') |
|
else: |
|
librosadisplay.specshow(data=x_stft_db, y_axis='log', sr=sample_rate) |
|
|
|
plt.colorbar(format='%+2.0f dB') |
|
|
|
plot_stft(audio_samples / MAX_ABS_INT16 , sample_rate=EXPECTED_SAMPLE_RATE) |
|
plt.show() |
|
|
|
"""We need one last conversion here. The audio samples are in int16 format. They need to be normalized to floats between -1 and 1.""" |
|
|
|
audio_samples = audio_samples / float(MAX_ABS_INT16) |
|
|
|
"""# Executing the Model |
|
Now is the easy part, let's load the model with **TensorFlow Hub**, and feed the audio to it. |
|
SPICE will give us two outputs: pitch and uncertainty |
|
|
|
**TensorFlow Hub** is a library for the publication, discovery, and consumption of reusable parts of machine learning models. It makes easy to use machine learning to solve your challenges. |
|
|
|
To load the model you just need the Hub module and the URL pointing to the model: |
|
""" |
|
|
|
|
|
model = hub.load("https://tfhub.dev/google/spice/2") |
|
|
|
"""**Note:** An interesting detail here is that all the model urls from Hub can be used for download and also to read the documentation, so if you point your browser to that link you can read documentation on how to use the model and learn more about how it was trained. |
|
|
|
With the model loaded, data prepared, we need 3 lines to get the result: |
|
""" |
|
|
|
|
|
model_output = model.signatures["serving_default"](tf.constant(audio_samples, tf.float32)) |
|
|
|
pitch_outputs = model_output["pitch"] |
|
uncertainty_outputs = model_output["uncertainty"] |
|
|
|
|
|
confidence_outputs = 1.0 - uncertainty_outputs |
|
|
|
fig, ax = plt.subplots() |
|
fig.set_size_inches(20, 10) |
|
plt.plot(pitch_outputs, label='pitch') |
|
plt.plot(confidence_outputs, label='confidence') |
|
plt.legend(loc="lower right") |
|
plt.show() |
|
|
|
"""Let's make the results easier to understand by removing all pitch estimates with low confidence (confidence < 0.9) and plot the remaining ones. |
|
|
|
|
|
""" |
|
|
|
confidence_outputs = list(confidence_outputs) |
|
pitch_outputs = [ float(x) for x in pitch_outputs] |
|
|
|
indices = range(len (pitch_outputs)) |
|
confident_pitch_outputs = [ (i,p) |
|
for i, p, c in zip(indices, pitch_outputs, confidence_outputs) if c >= 0.9 ] |
|
confident_pitch_outputs_x, confident_pitch_outputs_y = zip(*confident_pitch_outputs) |
|
|
|
fig, ax = plt.subplots() |
|
fig.set_size_inches(20, 10) |
|
ax.set_ylim([0, 1]) |
|
plt.scatter(confident_pitch_outputs_x, confident_pitch_outputs_y, ) |
|
plt.scatter(confident_pitch_outputs_x, confident_pitch_outputs_y, c="r") |
|
|
|
plt.show() |
|
|
|
"""The pitch values returned by SPICE are in the range from 0 to 1. Let's convert them to absolute pitch values in Hz.""" |
|
|
|
def output2hz(pitch_output): |
|
|
|
PT_OFFSET = 25.58 |
|
PT_SLOPE = 63.07 |
|
FMIN = 10.0; |
|
BINS_PER_OCTAVE = 12.0; |
|
cqt_bin = pitch_output * PT_SLOPE + PT_OFFSET; |
|
return FMIN * 2.0 ** (1.0 * cqt_bin / BINS_PER_OCTAVE) |
|
|
|
confident_pitch_values_hz = [ output2hz(p) for p in confident_pitch_outputs_y ] |
|
|
|
"""Now, let's see how good the prediction is: We will overlay the predicted pitches over the original spectrogram. To make the pitch predictions more visible, we changed the spectrogram to black and white.""" |
|
|
|
plot_stft(audio_samples / MAX_ABS_INT16 , |
|
sample_rate=EXPECTED_SAMPLE_RATE, show_black_and_white=True) |
|
|
|
|
|
plt.scatter(confident_pitch_outputs_x, confident_pitch_values_hz, c="r") |
|
|
|
plt.show() |
|
|
|
"""# Converting to musical notes |
|
|
|
Now that we have the pitch values, let's convert them to notes! |
|
This is part is challenging by itself. We have to take into account two things: |
|
1. the rests (when there's no singing) |
|
2. the size of each note (offsets) |
|
|
|
### 1: Adding zeros to the output to indicate when there's no singing |
|
""" |
|
|
|
pitch_outputs_and_rests = [ |
|
output2hz(p) if c >= 0.9 else 0 |
|
for i, p, c in zip(indices, pitch_outputs, confidence_outputs) |
|
] |
|
|
|
"""### 2: Adding note offsets |
|
|
|
When a person sings freely, the melody may have an offset to the absolute pitch values that notes can represent. |
|
Hence, to convert predictions to notes, one needs to correct for this possible offset. |
|
This is what the following code computes. |
|
""" |
|
|
|
A4 = 440 |
|
C0 = A4 * pow(2, -4.75) |
|
note_names = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"] |
|
|
|
def hz2offset(freq): |
|
|
|
if freq == 0: |
|
return None |
|
|
|
h = round(12 * math.log2(freq / C0)) |
|
return 12 * math.log2(freq / C0) - h |
|
|
|
|
|
|
|
|
|
offsets = [hz2offset(p) for p in pitch_outputs_and_rests if p != 0] |
|
print("offsets: ", offsets) |
|
|
|
ideal_offset = statistics.mean(offsets) |
|
print("ideal offset: ", ideal_offset) |
|
|
|
"""We can now use some heuristics to try and estimate the most likely sequence of notes that were sung. |
|
The ideal offset computed above is one ingredient - but we also need to know the speed (how many predictions make, say, an eighth?), and the time offset to start quantizing. To keep it simple, we'll just try different speeds and time offsets and measure the quantization error, using in the end the values that minimize this error. |
|
""" |
|
|
|
def quantize_predictions(group, ideal_offset): |
|
|
|
non_zero_values = [v for v in group if v != 0] |
|
zero_values_count = len(group) - len(non_zero_values) |
|
|
|
|
|
if zero_values_count > 0.8 * len(group): |
|
|
|
|
|
return 0.51 * len(non_zero_values), "Rest" |
|
else: |
|
|
|
h = round( |
|
statistics.mean([ |
|
12 * math.log2(freq / C0) - ideal_offset for freq in non_zero_values |
|
])) |
|
octave = h // 12 |
|
n = h % 12 |
|
note = note_names[n] + str(octave) |
|
|
|
error = sum([ |
|
abs(12 * math.log2(freq / C0) - ideal_offset - h) |
|
for freq in non_zero_values |
|
]) |
|
return error, note |
|
|
|
|
|
def get_quantization_and_error(pitch_outputs_and_rests, predictions_per_eighth, |
|
prediction_start_offset, ideal_offset): |
|
|
|
pitch_outputs_and_rests = [0] * prediction_start_offset + \ |
|
pitch_outputs_and_rests |
|
|
|
groups = [ |
|
pitch_outputs_and_rests[i:i + predictions_per_eighth] |
|
for i in range(0, len(pitch_outputs_and_rests), predictions_per_eighth) |
|
] |
|
|
|
quantization_error = 0 |
|
|
|
notes_and_rests = [] |
|
for group in groups: |
|
error, note_or_rest = quantize_predictions(group, ideal_offset) |
|
quantization_error += error |
|
notes_and_rests.append(note_or_rest) |
|
|
|
return quantization_error, notes_and_rests |
|
|
|
|
|
best_error = float("inf") |
|
best_notes_and_rests = None |
|
best_predictions_per_note = None |
|
|
|
for predictions_per_note in range(20, 65, 1): |
|
for prediction_start_offset in range(predictions_per_note): |
|
|
|
error, notes_and_rests = get_quantization_and_error( |
|
pitch_outputs_and_rests, predictions_per_note, |
|
prediction_start_offset, ideal_offset) |
|
|
|
if error < best_error: |
|
best_error = error |
|
best_notes_and_rests = notes_and_rests |
|
best_predictions_per_note = predictions_per_note |
|
|
|
|
|
|
|
while best_notes_and_rests[0] == 'Rest': |
|
best_notes_and_rests = best_notes_and_rests[1:] |
|
|
|
while best_notes_and_rests[-1] == 'Rest': |
|
best_notes_and_rests = best_notes_and_rests[:-1] |
|
|
|
"""Now let's write the quantized notes as sheet music score! |
|
|
|
To do it we will use two libraries: [music21](http://web.mit.edu/music21/) and [Open Sheet Music Display](https://github.com/opensheetmusicdisplay/opensheetmusicdisplay) |
|
|
|
**Note:** for simplicity, we assume here that all notes have the same duration (a half note). |
|
""" |
|
|
|
|
|
sc = music21.stream.Score() |
|
|
|
bpm = 60 * 60 / best_predictions_per_note |
|
print ('bpm: ', bpm) |
|
a = music21.tempo.MetronomeMark(number=bpm) |
|
sc.insert(0,a) |
|
|
|
for snote in best_notes_and_rests: |
|
d = 'half' |
|
if snote == 'Rest': |
|
sc.append(music21.note.Rest(type=d)) |
|
else: |
|
sc.append(music21.note.Note(snote, type=d)) |
|
|
|
|
|
|
|
from IPython.core.display import display, HTML, Javascript |
|
import json, random |
|
|
|
def showScore(score): |
|
xml = open(score.write('musicxml')).read() |
|
showMusicXML(xml) |
|
|
|
def showMusicXML(xml): |
|
DIV_ID = "OSMD_div" |
|
display(HTML('<div id="'+DIV_ID+'">loading OpenSheetMusicDisplay</div>')) |
|
script = """ |
|
var div_id = {{DIV_ID}}; |
|
function loadOSMD() { |
|
return new Promise(function(resolve, reject){ |
|
if (window.opensheetmusicdisplay) { |
|
return resolve(window.opensheetmusicdisplay) |
|
} |
|
// OSMD script has a 'define' call which conflicts with requirejs |
|
var _define = window.define // save the define object |
|
window.define = undefined // now the loaded script will ignore requirejs |
|
var s = document.createElement( 'script' ); |
|
s.setAttribute( 'src', "https://cdn.jsdelivr.net/npm/opensheetmusicdisplay@0.7.6/build/opensheetmusicdisplay.min.js" ); |
|
//s.setAttribute( 'src', "/custom/opensheetmusicdisplay.js" ); |
|
s.onload=function(){ |
|
window.define = _define |
|
resolve(opensheetmusicdisplay); |
|
}; |
|
document.body.appendChild( s ); // browser will try to load the new script tag |
|
}) |
|
} |
|
loadOSMD().then((OSMD)=>{ |
|
window.openSheetMusicDisplay = new OSMD.OpenSheetMusicDisplay(div_id, { |
|
drawingParameters: "compacttight" |
|
}); |
|
openSheetMusicDisplay |
|
.load({{data}}) |
|
.then( |
|
function() { |
|
openSheetMusicDisplay.render(); |
|
} |
|
); |
|
}) |
|
""".replace('{{DIV_ID}}',DIV_ID).replace('{{data}}',json.dumps(xml)) |
|
display(Javascript(script)) |
|
return |
|
|
|
|
|
showScore(sc) |
|
print(best_notes_and_rests) |
|
|
|
"""Let's convert the music notes to a MIDI file and listen to it. |
|
|
|
To create this file, we can use the stream we created before. |
|
""" |
|
|
|
|
|
converted_audio_file_as_midi = converted_audio_file[:-4] + '.mid' |
|
fp = sc.write('midi', fp=converted_audio_file_as_midi) |
|
|
|
wav_from_created_midi = converted_audio_file_as_midi.replace(' ', '_') + "_midioutput.wav" |
|
print(wav_from_created_midi) |
|
|
|
"""To listen to it on colab, we need to convert it back to wav. An easy way of doing that is using Timidity.""" |
|
|
|
!timidity $converted_audio_file_as_midi -Ow -o $wav_from_created_midi |
|
|
|
"""And finally, listen the audio, created from notes, created via MIDI from the predicted pitches, inferred by the model! |
|
|
|
""" |
|
|
|
Audio(wav_from_created_midi) |