Spaces:
Runtime error
Runtime error
add step value slider
Browse files
app.py
CHANGED
@@ -5,12 +5,12 @@ import gradio as gr
|
|
5 |
from audiodiffusion import AudioDiffusion
|
6 |
|
7 |
|
8 |
-
def generate_spectrogram_audio_and_loop(audio_file,model_id):
|
9 |
print(audio_file)
|
10 |
print(model_id)
|
11 |
audio_diffusion = AudioDiffusion(model_id=model_id)
|
12 |
image, (sample_rate,
|
13 |
-
audio) = audio_diffusion.generate_spectrogram_and_audio_from_audio(audio_file)
|
14 |
loop = AudioDiffusion.loop_it(audio, sample_rate)
|
15 |
if loop is None:
|
16 |
loop = audio
|
@@ -20,10 +20,11 @@ def generate_spectrogram_audio_and_loop(audio_file,model_id):
|
|
20 |
demo = gr.Interface(fn=generate_spectrogram_audio_and_loop,
|
21 |
title="Audio Diffusion",
|
22 |
description="Forked from https://huggingface.co/spaces/teticio/audio-diffusion Built to style transfer to audio using Huggingface diffusers.\
|
23 |
-
Outputs a 5 second audio clip with elements from the initial audio uploaded. This takes about 2 hours without a GPU, so why not bake a cake in the meantime? (Or try the teticio/audio-diffusion-ddim-256 \
|
24 |
-
model which is faster.) The code for doing style transfer method was already
|
25 |
inputs=[
|
26 |
gr.Audio(source="upload",type="filepath"),
|
|
|
27 |
gr.Dropdown(label="Model",
|
28 |
choices=[
|
29 |
"teticio/audio-diffusion-256",
|
@@ -45,4 +46,4 @@ if __name__ == "__main__":
|
|
45 |
parser.add_argument("--port", type=int)
|
46 |
parser.add_argument("--server", type=int)
|
47 |
args = parser.parse_args()
|
48 |
-
demo.launch(server_name=args.server or "0.0.0.0", server_port=args.port)
|
|
|
5 |
from audiodiffusion import AudioDiffusion
|
6 |
|
7 |
|
8 |
+
def generate_spectrogram_audio_and_loop(audio_file,steps,model_id):
|
9 |
print(audio_file)
|
10 |
print(model_id)
|
11 |
audio_diffusion = AudioDiffusion(model_id=model_id)
|
12 |
image, (sample_rate,
|
13 |
+
audio) = audio_diffusion.generate_spectrogram_and_audio_from_audio(audio_file,steps)
|
14 |
loop = AudioDiffusion.loop_it(audio, sample_rate)
|
15 |
if loop is None:
|
16 |
loop = audio
|
|
|
20 |
demo = gr.Interface(fn=generate_spectrogram_audio_and_loop,
|
21 |
title="Audio Diffusion",
|
22 |
description="Forked from https://huggingface.co/spaces/teticio/audio-diffusion Built to style transfer to audio using Huggingface diffusers.\
|
23 |
+
Outputs a 5 second audio clip with elements from the initial audio uploaded, steps is relative to the amount of style transfer from model to do. This takes about 2 hours without a GPU, so why not bake a cake in the meantime? (Or try the teticio/audio-diffusion-ddim-256 \
|
24 |
+
model which is faster.) The code for doing style transfer method was already in teticio's repo and python notebooks this is just my attempt to hook it up in the hugging face space. still need some more testing and such but would be cool to add more models, do inpainting, outpointing and get the api working with the updated pipelines",
|
25 |
inputs=[
|
26 |
gr.Audio(source="upload",type="filepath"),
|
27 |
+
gr.Slider(minimum=0, maximum=1000,value=500, step=1, label="Steps counter between 0 and 1000, high means more style transfer from model"),
|
28 |
gr.Dropdown(label="Model",
|
29 |
choices=[
|
30 |
"teticio/audio-diffusion-256",
|
|
|
46 |
parser.add_argument("--port", type=int)
|
47 |
parser.add_argument("--server", type=int)
|
48 |
args = parser.parse_args()
|
49 |
+
demo.launch(server_name=args.server or "0.0.0.0", server_port=args.port)
|