Spaces:
Runtime error
Runtime error
File size: 6,268 Bytes
502b397 bf2a8ff e12608e 172ec24 7091430 172ec24 7091430 e12608e 7091430 72e513f 7091430 72e513f 7091430 72e513f 7091430 80ca55c 985c6bd 7091430 72e513f 7091430 172ec24 7091430 bf2a8ff 502b397 7091430 172ec24 bf2a8ff 7bd1e74 bf2a8ff 7091430 bf2a8ff 7091430 ccb306d bf2a8ff 7091430 bf2a8ff 7091430 bf2a8ff 7091430 bf2a8ff 7091430 ccb306d bf2a8ff 7091430 7c1a8fa 7091430 72e513f 88dd1aa 275d7e8 bf2a8ff 275d7e8 f25e7c2 e12608e 88dd1aa 275d7e8 7091430 5d7014c 7091430 213b090 7091430 ccb306d 7091430 8bda11a bf2a8ff 8bda11a 35e9544 8bda11a 172ec24 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
import spaces
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
from transformers.utils import is_flash_attn_2_available, is_torch_sdpa_available
from transformers.pipelines.audio_utils import ffmpeg_read
import torch
import gradio as gr
import time
BATCH_SIZE = 16
MAX_AUDIO_MINS = 30 # maximum audio input in minutes
device = "cuda:0" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
attn_implementation = "flash_attention_2" if is_flash_attn_2_available() else "sdpa" if is_torch_sdpa_available() else "eager"
model = AutoModelForSpeechSeq2Seq.from_pretrained(
"openai/whisper-large-v3", torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, attn_implementation=attn_implementation
)
distilled_model = AutoModelForSpeechSeq2Seq.from_pretrained(
"distil-whisper/distil-large-v3", torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, attn_implementation=attn_implementation
)
processor = AutoProcessor.from_pretrained("openai/whisper-large-v3")
model.to(device)
distilled_model.to(device)
pipe = pipeline(
"automatic-speech-recognition",
model=model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
max_new_tokens=128,
chunk_length_s=30,
torch_dtype=torch_dtype,
device=device,
generate_kwargs={"language": "en", "task": "transcribe"},
return_timestamps=True
)
pipe_forward = pipe._forward
distil_pipe = pipeline(
"automatic-speech-recognition",
model=distilled_model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
max_new_tokens=128,
chunk_length_s=25,
torch_dtype=torch_dtype,
device=device,
generate_kwargs={"language": "en", "task": "transcribe"},
)
distil_pipe_forward = distil_pipe._forward
@spaces.GPU
def transcribe(inputs):
if inputs is None:
raise gr.Error("No audio file submitted! Please record or upload an audio file before submitting your request.")
with open(inputs, "rb") as f:
inputs = f.read()
inputs = ffmpeg_read(inputs, pipe.feature_extractor.sampling_rate)
audio_length_mins = len(inputs) / pipe.feature_extractor.sampling_rate / 60
if audio_length_mins > MAX_AUDIO_MINS:
raise gr.Error(
f"To ensure fair usage of the Space, the maximum audio length permitted is {MAX_AUDIO_MINS} minutes."
f"Got an audio of length {round(audio_length_mins, 3)} minutes."
)
inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
def _forward_distil_time(*args, **kwargs):
global distil_runtime
start_time = time.time()
result = distil_pipe_forward(*args, **kwargs)
distil_runtime = time.time() - start_time
distil_runtime = round(distil_runtime, 2)
return result
distil_pipe._forward = _forward_distil_time
distil_text = distil_pipe(inputs.copy(), batch_size=BATCH_SIZE)["text"]
yield distil_text, distil_runtime, None, None, None
def _forward_time(*args, **kwargs):
global runtime
start_time = time.time()
result = pipe_forward(*args, **kwargs)
runtime = time.time() - start_time
runtime = round(runtime, 2)
return result
pipe._forward = _forward_time
text = pipe(inputs, batch_size=BATCH_SIZE)["text"]
yield distil_text, distil_runtime, text, runtime
if __name__ == "__main__":
with gr.Blocks() as demo:
gr.HTML(
"""
<div style="text-align: center; max-width: 700px; margin: 0 auto;">
<div
style="
display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem;
"
>
<h1 style="font-weight: 900; margin-bottom: 7px; line-height: normal;">
Whisper vs Distil-Whisper: Speed Comparison
</h1>
</div>
</div>
"""
)
gr.HTML(
f"""
<p><a href="https://huggingface.co/distil-whisper/distil-large-v3"> Distil-Whisper</a> is a distilled variant
of the <a href="https://huggingface.co/openai/whisper-large-v3"> Whisper</a> model by OpenAI. Compared to Whisper,
Distil-Whisper runs 6x faster with 50% fewer parameters, while performing to within 1% word error rate (WER) on
out-of-distribution evaluation data.</p>
<p>In this demo, we perform a speed comparison between Whisper and Distil-Whisper in order to test this claim.
Both models use the <a href="https://huggingface.co/distil-whisper/distil-large-v3#chunked-long-form"> chunked long-form transcription algorithm</a>
in 🤗 Transformers. To use Distil-Whisper yourself, check the code examples on the
<a href="https://github.com/huggingface/distil-whisper#1-usage"> Distil-Whisper repository</a>. To ensure fair
usage of the Space, we ask that audio file inputs are kept to < 30 mins.</p>
"""
)
audio = gr.components.Audio(type="filepath", label="Audio input")
button = gr.Button("Transcribe")
with gr.Row():
distil_runtime = gr.components.Textbox(label="Distil-Whisper Transcription Time (s)")
runtime = gr.components.Textbox(label="Whisper Transcription Time (s)")
with gr.Row():
distil_transcription = gr.components.Textbox(label="Distil-Whisper Transcription", show_copy_button=True)
transcription = gr.components.Textbox(label="Whisper Transcription", show_copy_button=True)
button.click(
fn=transcribe,
inputs=audio,
outputs=[distil_transcription, distil_runtime, transcription, runtime],
)
gr.Markdown("## Examples")
gr.Examples(
[["./assets/example_1.wav"], ["./assets/example_2.wav"]],
audio,
outputs=[distil_transcription, distil_runtime, transcription, runtime],
fn=transcribe,
cache_examples=False,
)
demo.queue(max_size=10).launch()
|