el-el-san commited on
Commit
1c4f2f2
1 Parent(s): 330fac6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -21
app.py CHANGED
@@ -1,4 +1,3 @@
1
- import spaces
2
  import gradio as gr
3
  import numpy as np
4
  import PIL.Image
@@ -8,22 +7,11 @@ from diffusers import ControlNetModel, StableDiffusionXLPipeline, AutoencoderKL
8
  from diffusers import DDIMScheduler, EulerAncestralDiscreteScheduler
9
  import cv2
10
  import torch
11
-
12
 
13
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
 
15
- #vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
16
-
17
- #pipe = StableDiffusionXLPipeline.from_pretrained(
18
- # #"yodayo-ai/clandestine-xl-1.0",
19
- # torch_dtype=torch.float16,
20
- # use_safetensors=True,
21
- # custom_pipeline="lpw_stable_diffusion_xl",
22
- # add_watermarker=False #,
23
- # #variant="fp16"
24
- #)
25
  pipe = StableDiffusionXLPipeline.from_single_file(
26
- #"https://huggingface.co/Laxhar/noob_sdxl_beta/noob_hercules4/fp16/checkpoint-e0_s10000.safetensors/checkpoint-e0_s10000.safetensors",
27
  "https://huggingface.co/Laxhar/noob_sdxl_beta/noob_hercules3/checkpoint/checkpoint-e2_s109089.safetensors/checkpoint-e2_s109089.safetensors",
28
  use_safetensors=True,
29
  torch_dtype=torch.float16,
@@ -34,7 +22,6 @@ pipe.to(device)
34
  MAX_SEED = np.iinfo(np.int32).max
35
  MAX_IMAGE_SIZE = 1216
36
 
37
-
38
  @spaces.GPU
39
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
40
 
@@ -53,8 +40,11 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
53
  generator=generator
54
  ).images[0]
55
 
56
- return output_image
 
 
57
 
 
58
 
59
  css = """
60
  #col-container {
@@ -70,8 +60,6 @@ with gr.Blocks(css=css) as demo:
70
  Text-to-Image Demo
71
  using [Noob SDXL beta model](https://huggingface.co/Laxhar)
72
  """)
73
- #yodayo-ai/clandestine-xl-1.0 
74
- #yodayo-ai/holodayo-xl-2.1
75
  with gr.Row():
76
  prompt = gr.Text(
77
  label="Prompt",
@@ -83,7 +71,7 @@ with gr.Blocks(css=css) as demo:
83
 
84
  run_button = gr.Button("Run", scale=0)
85
 
86
- result = gr.Image(label="Result", show_label=False, show_download_button=True)
87
 
88
  with gr.Accordion("Advanced Settings", open=False):
89
 
@@ -110,7 +98,7 @@ with gr.Blocks(css=css) as demo:
110
  minimum=256,
111
  maximum=MAX_IMAGE_SIZE,
112
  step=32,
113
- value=1024,#832,
114
  )
115
 
116
  height = gr.Slider(
@@ -118,7 +106,7 @@ with gr.Blocks(css=css) as demo:
118
  minimum=256,
119
  maximum=MAX_IMAGE_SIZE,
120
  step=32,
121
- value=1024,#1216,
122
  )
123
 
124
  with gr.Row():
@@ -138,7 +126,7 @@ with gr.Blocks(css=css) as demo:
138
  value=28,
139
  )
140
 
141
- run_button.click(#lambda x: None, inputs=None, outputs=result).then(
142
  fn=infer,
143
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
144
  outputs=[result]
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import PIL.Image
 
7
  from diffusers import DDIMScheduler, EulerAncestralDiscreteScheduler
8
  import cv2
9
  import torch
10
+ import os
11
 
12
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
 
 
 
 
 
 
 
 
 
 
 
14
  pipe = StableDiffusionXLPipeline.from_single_file(
 
15
  "https://huggingface.co/Laxhar/noob_sdxl_beta/noob_hercules3/checkpoint/checkpoint-e2_s109089.safetensors/checkpoint-e2_s109089.safetensors",
16
  use_safetensors=True,
17
  torch_dtype=torch.float16,
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
  MAX_IMAGE_SIZE = 1216
24
 
 
25
  @spaces.GPU
26
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
27
 
 
40
  generator=generator
41
  ).images[0]
42
 
43
+ # PNG形式で一時的に保存
44
+ output_path = "output_image.png"
45
+ output_image.save(output_path, format="PNG")
46
 
47
+ return output_path # ファイルパスを返す
48
 
49
  css = """
50
  #col-container {
 
60
  Text-to-Image Demo
61
  using [Noob SDXL beta model](https://huggingface.co/Laxhar)
62
  """)
 
 
63
  with gr.Row():
64
  prompt = gr.Text(
65
  label="Prompt",
 
71
 
72
  run_button = gr.Button("Run", scale=0)
73
 
74
+ result = gr.Image(label="Result", show_label=False, type="filepath")
75
 
76
  with gr.Accordion("Advanced Settings", open=False):
77
 
 
98
  minimum=256,
99
  maximum=MAX_IMAGE_SIZE,
100
  step=32,
101
+ value=1024,
102
  )
103
 
104
  height = gr.Slider(
 
106
  minimum=256,
107
  maximum=MAX_IMAGE_SIZE,
108
  step=32,
109
+ value=1024,
110
  )
111
 
112
  with gr.Row():
 
126
  value=28,
127
  )
128
 
129
+ run_button.click(
130
  fn=infer,
131
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
132
  outputs=[result]