Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
from diffusers import DiffusionPipeline | |
from diffusers.utils import export_to_video | |
import torch | |
import os | |
from PIL import Image | |
import spaces | |
# Load the pre-trained pipeline | |
pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt") | |
# Define the Gradio interface | |
interface = gr.Interface( | |
fn=lambda img: generate_video(img), | |
inputs=gr.Image(type="pil"), | |
outputs=gr.Video(), | |
title="Stable Video Diffusion", | |
description="Upload an image to generate a video", | |
theme="soft" | |
) | |
def generate_video(image): | |
""" | |
Generates a video from an input image using the pipeline. | |
Args: | |
image: A PIL Image object representing the input image. | |
Returns: | |
The path of a video file. | |
""" | |
video_frames = pipeline(image=image, num_inference_steps=10).images | |
# Frames to Video | |
os.makedirs("outputs", exist_ok=True) | |
base_count = len(glob(os.path.join("outputs", "*.mp4"))) | |
video_path = os.path.join("outputs", f"{base_count:06d}.mp4") | |
export_to_video(video_frames, video_path, fps=6) | |
return video_path | |
# Launch the Gradio app | |
interface.launch() |