shahukareem's picture
initial commit
810776d
raw
history blame
1.56 kB
import soundfile as sf
import torch
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import gradio as gr
import sox
import os
def convert(inputfile, outfile):
sox_tfm = sox.Transformer()
sox_tfm.set_output_format(
file_type="wav", channels=1, encoding="signed-integer", rate=16000, bits=16
)
sox_tfm.build(inputfile, outfile)
api_token = os.getenv("API_TOKEN")
model_name = "shahukareem/Wav2Vec2-Large-XLSR-53-Dhivehi"
processor = Wav2Vec2Processor.from_pretrained(model_name, use_auth_token=api_token)
model = Wav2Vec2ForCTC.from_pretrained(model_name, use_auth_token=api_token)
def parse_transcription(wav_file):
filename = wav_file.name.split('.')[0]
convert(wav_file.name, filename + "16k.wav")
speech, _ = sf.read(filename + "16k.wav")
input_values = processor(speech, sampling_rate=16_000, return_tensors="pt").input_values
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.decode(predicted_ids[0], skip_special_tokens=True)
return transcription
output = gr.outputs.Textbox(label="The transcript")
input_ = gr.inputs.Audio(source="microphone", type="file")
gr.Interface(parse_transcription, inputs=input_, outputs=[output],
analytics_enabled=False,
show_tips=False,
theme='huggingface',
layout='vertical',
title="Speech Recognition for Dhivehi",
description="Speech Recognition Live Demo for Dhivehi",
enable_queue=True).launch( inline=False)