Prgckwb commited on
Commit
bb1c525
1 Parent(s): 941ac0f

:tada: init

Browse files
Files changed (1) hide show
  1. app.py +52 -25
app.py CHANGED
@@ -1,8 +1,15 @@
1
  import gradio as gr
2
  import spaces
3
  import torch
4
- from diffusers import DiffusionPipeline
5
  from PIL import Image
 
 
 
 
 
 
 
 
6
 
7
  # Global Variables
8
  current_model_id = "stabilityai/stable-diffusion-3-medium-diffusers"
@@ -16,10 +23,10 @@ pipe = DiffusionPipeline.from_pretrained(
16
  @spaces.GPU()
17
  @torch.inference_mode()
18
  def inference(
19
- model_id: str,
20
- prompt: str,
21
- negative_prompt: str = "",
22
- progress=gr.Progress(track_tqdm=True),
23
  ) -> Image.Image:
24
  global current_model_id, pipe
25
 
@@ -42,24 +49,44 @@ def inference(
42
 
43
 
44
  if __name__ == "__main__":
45
- demo = gr.Interface(
46
- fn=inference,
47
- inputs=[
48
- gr.Dropdown(
49
- label="Model ID",
50
- choices=[
51
- "stabilityai/stable-diffusion-3-medium-diffusers",
52
- "stabilityai/stable-diffusion-xl-base-1.0",
53
- "stabilityai/stable-diffusion-2-1",
54
- "runwayml/stable-diffusion-v1-5",
55
- ],
56
- value="stabilityai/stable-diffusion-3-medium-diffusers",
57
- ),
58
- gr.Text(label="Prompt", value=""),
59
- gr.Text(label="Negative Prompt", value=""),
60
- ],
61
- outputs=[
62
- gr.Image(label="Image", type="pil"),
63
- ],
64
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  demo.queue().launch()
 
1
  import gradio as gr
2
  import spaces
3
  import torch
 
4
  from PIL import Image
5
+ from diffusers import DiffusionPipeline
6
+
7
+ MODEL_CHOICES = [
8
+ "stabilityai/stable-diffusion-3-medium-diffusers",
9
+ "stabilityai/stable-diffusion-xl-base-1.0",
10
+ "stabilityai/stable-diffusion-2-1",
11
+ "runwayml/stable-diffusion-v1-5",
12
+ ]
13
 
14
  # Global Variables
15
  current_model_id = "stabilityai/stable-diffusion-3-medium-diffusers"
 
23
  @spaces.GPU()
24
  @torch.inference_mode()
25
  def inference(
26
+ model_id: str,
27
+ prompt: str,
28
+ negative_prompt: str = "",
29
+ progress=gr.Progress(track_tqdm=True),
30
  ) -> Image.Image:
31
  global current_model_id, pipe
32
 
 
49
 
50
 
51
  if __name__ == "__main__":
52
+ with gr.Blocks() as demo:
53
+ gr.Markdown(f"# Stable Diffusion Demo")
54
+
55
+ with gr.Row():
56
+ with gr.Column():
57
+ inputs = [
58
+ gr.Dropdown(
59
+ label="Model ID",
60
+ choices=MODEL_CHOICES,
61
+ value="stabilityai/stable-diffusion-3-medium-diffusers",
62
+ ),
63
+ gr.Text(label="Prompt", value=""),
64
+ gr.Text(label="Negative Prompt", value=""),
65
+ ]
66
+
67
+ with gr.Accordion("Additional Settings (W.I.P)", open=False):
68
+ additional_inputs = [
69
+ gr.Text(
70
+ label="Model URL",
71
+ lines=2,
72
+ placeholder="e.g. ) https://civitai.com/api/download/models/177164?type=Model&format=SafeTensor&size=full&fp=fp16"
73
+ ),
74
+ gr.Number(label="Num Inference Steps", value=None, minimum=1, maximum=1000, step=1)
75
+ ]
76
+
77
+ with gr.Column():
78
+ outputs = [
79
+ gr.Image(label="Image", type="pil"),
80
+ ]
81
+
82
+ gr.Examples(
83
+ examples=[
84
+ ["stabilityai/stable-diffusion-3-medium-diffusers", "A cat holding a sign that says Hello world", ""]
85
+ ],
86
+ inputs=inputs
87
+ )
88
+
89
+ btn = gr.Button("Generate")
90
+ btn.click(fn=inference, inputs=inputs, outputs=outputs)
91
+
92
  demo.queue().launch()