import gradio as gr import spaces import torch from PIL import Image from diffusers import DiffusionPipeline MODEL_CHOICES = [ "stabilityai/stable-diffusion-3-medium-diffusers", "stabilityai/stable-diffusion-xl-base-1.0", "stabilityai/stable-diffusion-2-1", "runwayml/stable-diffusion-v1-5", ] # Global Variables current_model_id = "stabilityai/stable-diffusion-3-medium-diffusers" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") pipe = DiffusionPipeline.from_pretrained( current_model_id, torch_dtype=torch.float16, ).to(device) @spaces.GPU() @torch.inference_mode() def inference( model_id: str, prompt: str, negative_prompt: str = "", progress=gr.Progress(track_tqdm=True), ) -> Image.Image: global current_model_id, pipe if model_id != current_model_id: try: pipe = DiffusionPipeline.from_pretrained( model_id, torch_dtype=torch.float16, ).to(device) current_model_id = model_id except Exception as e: raise gr.Error(str(e)) image = pipe( prompt, negative_prompt=negative_prompt, ).images[0] return image if __name__ == "__main__": with gr.Blocks() as demo: gr.Markdown(f"# Stable Diffusion Demo") with gr.Row(): with gr.Column(): inputs = [ gr.Dropdown( label="Model ID", choices=MODEL_CHOICES, value="stabilityai/stable-diffusion-3-medium-diffusers", ), gr.Text(label="Prompt", value=""), gr.Text(label="Negative Prompt", value=""), ] with gr.Accordion("Additional Settings (W.I.P)", open=False): additional_inputs = [ gr.Text( label="Model URL", lines=2, placeholder="e.g. ) https://civitai.com/api/download/models/177164?type=Model&format=SafeTensor&size=full&fp=fp16" ), gr.Number(label="Num Inference Steps", value=None, minimum=1, maximum=1000, step=1) ] with gr.Column(): outputs = [ gr.Image(label="Image", type="pil"), ] gr.Examples( examples=[ ["stabilityai/stable-diffusion-3-medium-diffusers", "A cat holding a sign that says Hello world", ""] ], inputs=inputs ) btn = gr.Button("Generate") btn.click(fn=inference, inputs=inputs, outputs=outputs) demo.queue().launch()