abidlabs HF staff commited on
Commit
49524e0
1 Parent(s): fcd207b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -17
app.py CHANGED
@@ -1,23 +1,21 @@
1
- import numpy as np
2
  import gradio as gr
3
 
4
- notes = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
5
 
6
- def generate_tone(note, octave, duration):
7
- sr = 48000
8
- a4_freq, tones_from_a4 = 440, 12 * (octave - 4) + (note - 9)
9
- frequency = a4_freq * 2 ** (tones_from_a4 / 12)
10
- duration = int(duration)
11
- audio = np.linspace(0, duration, duration * sr)
12
- audio = (20000 * np.sin(audio * (2 * np.pi * frequency))).astype(np.int16)
13
- return (sr, audio)
 
14
 
15
  gr.Interface(
16
- generate_tone,
17
- [
18
- gr.inputs.Dropdown(notes, type="index"),
19
- gr.inputs.Slider(4, 6, step=1),
20
- gr.inputs.Textbox(type="number", default=1, label="Duration in seconds"),
21
- ],
22
- "audio",
23
  ).launch()
 
1
+ from transformers import pipeline
2
  import gradio as gr
3
 
4
+ model = pipeline("automatic-speech-recognition")
5
 
6
+ def transcribe_audio(mic=None, file=None):
7
+ if mic is not None:
8
+ audio = mic
9
+ elif file is not None:
10
+ audio = file
11
+ else:
12
+ return("You must either provide a mic recording or a file")
13
+ transcription = model(audio)["text"]
14
+ return transcription
15
 
16
  gr.Interface(
17
+ fn=transcribe_audio,
18
+ inputs=[gr.inputs.Audio(source="microphone", type="filepath", optional=True),
19
+ gr.inputs.Audio(source ="upload", type="filepath", optional=True)],
20
+ outputs="text",
 
 
 
21
  ).launch()