File size: 2,791 Bytes
f901652
b024062
 
941ac0f
bb1c525
 
 
 
 
 
 
 
941ac0f
 
 
 
 
 
 
 
f901652
 
b024062
 
 
bb1c525
 
 
 
941ac0f
 
b024062
941ac0f
 
 
 
 
 
 
 
 
b024062
 
 
 
 
 
 
 
 
 
bb1c525
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
941ac0f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import gradio as gr
import spaces
import torch
from PIL import Image
from diffusers import DiffusionPipeline

MODEL_CHOICES = [
    "stabilityai/stable-diffusion-3-medium-diffusers",
    "stabilityai/stable-diffusion-xl-base-1.0",
    "stabilityai/stable-diffusion-2-1",
    "runwayml/stable-diffusion-v1-5",
]

# Global Variables
current_model_id = "stabilityai/stable-diffusion-3-medium-diffusers"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
pipe = DiffusionPipeline.from_pretrained(
    current_model_id,
    torch_dtype=torch.float16,
).to(device)


@spaces.GPU()
@torch.inference_mode()
def inference(
        model_id: str,
        prompt: str,
        negative_prompt: str = "",
        progress=gr.Progress(track_tqdm=True),
) -> Image.Image:
    global current_model_id, pipe

    if model_id != current_model_id:
        try:
            pipe = DiffusionPipeline.from_pretrained(
                model_id,
                torch_dtype=torch.float16,
            ).to(device)
            current_model_id = model_id
        except Exception as e:
            raise gr.Error(str(e))

    image = pipe(
        prompt,
        negative_prompt=negative_prompt,
    ).images[0]

    return image


if __name__ == "__main__":
    with gr.Blocks() as demo:
        gr.Markdown(f"# Stable Diffusion Demo")

        with gr.Row():
            with gr.Column():
                inputs = [
                    gr.Dropdown(
                        label="Model ID",
                        choices=MODEL_CHOICES,
                        value="stabilityai/stable-diffusion-3-medium-diffusers",
                    ),
                    gr.Text(label="Prompt", value=""),
                    gr.Text(label="Negative Prompt", value=""),
                ]

                with gr.Accordion("Additional Settings (W.I.P)", open=False):
                    additional_inputs = [
                        gr.Text(
                            label="Model URL",
                            lines=2,
                            placeholder="e.g. ) https://civitai.com/api/download/models/177164?type=Model&format=SafeTensor&size=full&fp=fp16"
                        ),
                        gr.Number(label="Num Inference Steps", value=None, minimum=1, maximum=1000, step=1)
                    ]

            with gr.Column():
                outputs = [
                    gr.Image(label="Image", type="pil"),
                ]

        gr.Examples(
            examples=[
                ["stabilityai/stable-diffusion-3-medium-diffusers", "A cat holding a sign that says Hello world", ""]
            ],
            inputs=inputs
        )

        btn = gr.Button("Generate")
        btn.click(fn=inference, inputs=inputs, outputs=outputs)

    demo.queue().launch()