from flask import Flask, request, jsonify from whisper_online import * import librosa import gradio as gr @lru_cache def load_audio(fname): a, _ = librosa.load(fname, sr=16000, dtype=np.float32) return a asr = FasterWhisperASR("en", "large-v2") # loads and wraps Whisper model online = OnlineASRProcessor(asr) # create processing object with default buffer trimming option def transcribe(audio_file) try: audio_data = load_audio(audio_file) # Insert audio chunk into the ASR processor online.insert_audio_chunk(audio_data) # Process the audio chunk o = online.process_iter() # Send current partial output return o except Exception as e: return str(e) def finish_stream(): try: # Finish audio processing o = online.finish() # Send last output return o except Exception as e: return str(e) iface = gr.Interface(fn=transcribe,inputs = "text", outputs = "text") iface.launch()