Spaces:
Sleeping
Sleeping
File size: 1,348 Bytes
1b1e8e6 02500bf a4e0bf3 02500bf 804f975 1b1e8e6 158b7b0 1b1e8e6 a4e0bf3 1b1e8e6 17734e0 1b1e8e6 a4e0bf3 1b1e8e6 a4e0bf3 fecc6b0 a4e0bf3 0c21d1d 1b34d2e a4e0bf3 1b34d2e 615788f a4e0bf3 5892808 a4e0bf3 1b1e8e6 46d22b1 1b34d2e 46d22b1 b0208a0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
from transformers import pipeline
import torch
from transformers import pipeline
from transformers.pipelines.audio_utils import ffmpeg_read
import gradio as gr
import gradio as gr
device = 0 if torch.cuda.is_available() else "cpu"
MODEL_ID = "jvalero/wav2vec2-base-vinyl_condition"
pipe = pipeline(
task="audio-classification",
model=MODEL_ID,
chunk_length_s=30,
device=device,
)
def get_vinyl_condition(filepath):
output = pipe(
filepath,
max_new_tokens=256,
chunk_length_s=30,
batch_size=8,
)
return output[0]["label"]
demo = gr.Blocks()
demo = gr.Blocks()
file_transcribe = gr.Interface(
fn=get_vinyl_condition,
inputs=[
gr.Audio(sources="upload", label="Audio file", type="filepath"),
],
outputs="label",
title="Vinyl Condition Classificator",
description=(
"Get your vinyl condition based on the goldmine grading standard! Demo uses the"
f" checkpoint [{MODEL_ID}](https://huggingface.co/{MODEL_ID}) and 🤗 Transformers to get the condition of audio files"
" of arbitrary length."
),
examples=[
["./example.mp3"],
["./example1.mp3"],
],
cache_examples=True,
allow_flagging="never",
)
with demo:
gr.TabbedInterface([file_transcribe], ["Get Viny Condition"])
demo.launch()
|