Spaces:
Runtime error
Runtime error
Julien Simon
commited on
Commit
•
bd27e22
1
Parent(s):
92f8ba8
Add audio spectrogram transformer
Browse files- app.py +17 -14
- requirements.txt +2 -1
app.py
CHANGED
@@ -1,37 +1,40 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
|
4 |
-
|
|
|
|
|
|
|
5 |
|
6 |
-
p = pipeline("audio-classification", model=model_name)
|
7 |
|
8 |
-
|
9 |
-
|
10 |
pred = p(file)
|
11 |
return {x["label"]: x["score"] for x in pred}
|
12 |
|
13 |
|
14 |
# Gradio inputs
|
15 |
-
mic = gr.
|
|
|
16 |
|
17 |
# Gradio outputs
|
18 |
-
labels = gr.
|
19 |
|
20 |
-
description = "This Space showcases
|
21 |
|
22 |
iface = gr.Interface(
|
23 |
theme="huggingface",
|
24 |
description=description,
|
25 |
fn=process,
|
26 |
-
inputs=[mic],
|
27 |
outputs=[labels],
|
28 |
examples=[
|
29 |
-
["backward16k.wav"],
|
30 |
-
["happy16k.wav"],
|
31 |
-
["marvin16k.wav"],
|
32 |
-
["seven16k.wav"],
|
33 |
-
["stop16k.wav"],
|
34 |
-
["up16k.wav"],
|
35 |
],
|
36 |
allow_flagging="never",
|
37 |
)
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
|
4 |
+
model_names = [
|
5 |
+
"juliensimon/wav2vec2-conformer-rel-pos-large-finetuned-speech-commands",
|
6 |
+
"MIT/ast-finetuned-speech-commands-v2",
|
7 |
+
]
|
8 |
|
|
|
9 |
|
10 |
+
def process(file, model_name):
|
11 |
+
p = pipeline("audio-classification", model=model_name)
|
12 |
pred = p(file)
|
13 |
return {x["label"]: x["score"] for x in pred}
|
14 |
|
15 |
|
16 |
# Gradio inputs
|
17 |
+
mic = gr.Audio(source="microphone", type="filepath", label="Speech input")
|
18 |
+
model_selection = gr.Dropdown(model_names, label="Model selection")
|
19 |
|
20 |
# Gradio outputs
|
21 |
+
labels = gr.Label(num_top_classes=3)
|
22 |
|
23 |
+
description = "This Space showcases two audio classification models fine-tuned on the speech_commands dataset:\n\n - wav2vec2-conformer: 97.2% accuracy, added in transformers 4.20.0.\n - audio-spectrogram-transformer: 98.12% accuracy, added in transformers 4.25.1.\n \n They can spot one of the following keywords: 'Yes', 'No', 'Up', 'Down', 'Left', 'Right', 'On', 'Off', 'Stop', 'Go', 'Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Bed', 'Bird', 'Cat', 'Dog', 'Happy', 'House', 'Marvin', 'Sheila', 'Tree', 'Wow', 'Backward', 'Forward', 'Follow', 'Learn', 'Visual'."
|
24 |
|
25 |
iface = gr.Interface(
|
26 |
theme="huggingface",
|
27 |
description=description,
|
28 |
fn=process,
|
29 |
+
inputs=[mic, model_selection],
|
30 |
outputs=[labels],
|
31 |
examples=[
|
32 |
+
["backward16k.wav", "MIT/ast-finetuned-speech-commands-v2"],
|
33 |
+
["happy16k.wav", "MIT/ast-finetuned-speech-commands-v2"],
|
34 |
+
["marvin16k.wav", "MIT/ast-finetuned-speech-commands-v2"],
|
35 |
+
["seven16k.wav", "MIT/ast-finetuned-speech-commands-v2"],
|
36 |
+
["stop16k.wav", "MIT/ast-finetuned-speech-commands-v2"],
|
37 |
+
["up16k.wav", "MIT/ast-finetuned-speech-commands-v2"],
|
38 |
],
|
39 |
allow_flagging="never",
|
40 |
)
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
torch
|
2 |
-
|
|
|
3 |
librosa
|
|
|
1 |
torch
|
2 |
+
torchaudio
|
3 |
+
transformers>=4.25.1
|
4 |
librosa
|