Spaces:
Runtime error
Runtime error
davidscripka
commited on
Commit
•
3d4323f
1
Parent(s):
caa16ad
Initial version of openWakeWord Gradio demo
Browse files- app.py +66 -0
- requirements.txt +4 -0
app.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import json
|
3 |
+
import pandas as pd
|
4 |
+
import collections
|
5 |
+
import scipy.signal
|
6 |
+
from functools import partial
|
7 |
+
from openwakeword.model import Model
|
8 |
+
|
9 |
+
# Load openWakeWord models
|
10 |
+
model = Model()
|
11 |
+
|
12 |
+
# Define function to process audio
|
13 |
+
def process_audio(audio, state=collections.defaultdict(partial(collections.deque, maxlen=60))):
|
14 |
+
# Resample audio to 16khz if needed
|
15 |
+
if audio[0] != 16000:
|
16 |
+
data = scipy.signal.resample(audio[1], int(float(audio[1].shape[0])/audio[0]*16000))
|
17 |
+
|
18 |
+
# Get predictions
|
19 |
+
for i in range(0, len(data), 1280):
|
20 |
+
chunk = data[i:i+1280]
|
21 |
+
if len(chunk) == 1280:
|
22 |
+
prediction = model.predict(chunk)
|
23 |
+
for key in prediction:
|
24 |
+
#Fill deque with zeros if it's empty
|
25 |
+
if len(state[key]) == 0:
|
26 |
+
state[key].extend(np.zeros(60))
|
27 |
+
|
28 |
+
# Add prediction
|
29 |
+
state[key].append(prediction[key])
|
30 |
+
|
31 |
+
# Make line plot
|
32 |
+
dfs = []
|
33 |
+
for key in state.keys():
|
34 |
+
df = pd.DataFrame({"x": np.arange(len(state[key])), "y": state[key], "Model": key})
|
35 |
+
dfs.append(df)
|
36 |
+
|
37 |
+
df = pd.concat(dfs)
|
38 |
+
plot = gr.LinePlot().update(value = df, x='x', y='y', color="Model", y_lim = (0,1), tooltip="Model",
|
39 |
+
width=600, height=300, x_title="Time (frames)", y_title="Model Score", color_legend_position="bottom")
|
40 |
+
|
41 |
+
# Manually adjust how the legend is displayed
|
42 |
+
tmp = json.loads(plot["value"]["plot"])
|
43 |
+
tmp["layer"][0]['encoding']['color']['legend']["direction"] = "vertical"
|
44 |
+
tmp["layer"][0]['encoding']['color']['legend']["columns"] = 4
|
45 |
+
tmp["layer"][0]['encoding']['color']['legend']["labelFontSize"] = 12
|
46 |
+
tmp["layer"][0]['encoding']['color']['legend']["titleFontSize"] = 14
|
47 |
+
|
48 |
+
plot["value"]['plot'] = json.dumps(tmp)
|
49 |
+
|
50 |
+
return plot, state
|
51 |
+
|
52 |
+
# Create Gradio interface and launch
|
53 |
+
gr_int = gr.Interface(
|
54 |
+
css = ".flex {flex-direction: column} .gr-panel {width: 100%}",
|
55 |
+
fn=process_audio,
|
56 |
+
inputs=[
|
57 |
+
gr.Audio(source="microphone", type="numpy", streaming=True, show_label=False),
|
58 |
+
"state"
|
59 |
+
],
|
60 |
+
outputs=[
|
61 |
+
gr.LinePlot(show_label=False),
|
62 |
+
"state"
|
63 |
+
],
|
64 |
+
live=True)
|
65 |
+
|
66 |
+
gr_int.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
openwakeword>=0.1.0,<1
|
2 |
+
gradio==3.15.0
|
3 |
+
scipy
|
4 |
+
pandas
|