yonishafir commited on
Commit
971d02a
1 Parent(s): d81420a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -91,7 +91,7 @@ pipe.fuse_lora()
91
  pipe = pipe.to(device)
92
  # pipe.enable_xformers_memory_efficient_attention()
93
 
94
- generator = torch.Generator(device='cuda').manual_seed(123456)
95
 
96
  vae = pipe.vae
97
 
@@ -105,7 +105,7 @@ def read_content(file_path: str) -> str:
105
 
106
  return content
107
 
108
- def predict(dict, prompt="", negative_prompt = default_negative_prompt, guidance_scale=1.2, steps=12, strength=1.0):
109
  if negative_prompt == "":
110
  negative_prompt = None
111
 
@@ -142,7 +142,8 @@ def predict(dict, prompt="", negative_prompt = default_negative_prompt, guidance
142
  # mask_resized = mask_resized.to(torch.float16)
143
  masked_image = torch.cat([control_latents, mask_resized], dim=1)
144
 
145
-
 
146
  output = pipe(prompt = prompt,
147
  width=width,
148
  height=height,
@@ -218,9 +219,10 @@ with image_blocks as demo:
218
 
219
  with gr.Accordion(label="Advanced Settings", open=False):
220
  with gr.Row(equal_height=True):
221
- guidance_scale = gr.Number(value=1.2, minimum=1.0, maximum=2, step=0.1, label="guidance_scale")
222
- steps = gr.Number(value=12, minimum=8, maximum=30, step=1, label="steps")
223
- strength = gr.Number(value=1, minimum=0.01, maximum=1.0, step=0.01, label="strength")
 
224
  negative_prompt = gr.Textbox(label="negative_prompt", value=default_negative_prompt, placeholder=default_negative_prompt, info="what you don't want to see in the image")
225
 
226
 
@@ -229,7 +231,7 @@ with image_blocks as demo:
229
 
230
 
231
 
232
- btn.click(fn=predict, inputs=[image, prompt, negative_prompt, guidance_scale, steps, strength], outputs=[image_out], api_name='run')
233
  prompt.submit(fn=predict, inputs=[image, prompt, negative_prompt, guidance_scale, steps, strength], outputs=[image_out])
234
 
235
  gr.HTML(
 
91
  pipe = pipe.to(device)
92
  # pipe.enable_xformers_memory_efficient_attention()
93
 
94
+ # generator = torch.Generator(device='cuda').manual_seed(123456)
95
 
96
  vae = pipe.vae
97
 
 
105
 
106
  return content
107
 
108
+ def predict(dict, prompt="", negative_prompt = default_negative_prompt, guidance_scale=1.2, steps=12, strength=1.0, seed=123456):
109
  if negative_prompt == "":
110
  negative_prompt = None
111
 
 
142
  # mask_resized = mask_resized.to(torch.float16)
143
  masked_image = torch.cat([control_latents, mask_resized], dim=1)
144
 
145
+ generator = torch.Generator(device='cuda').manual_seed(123456)
146
+
147
  output = pipe(prompt = prompt,
148
  width=width,
149
  height=height,
 
219
 
220
  with gr.Accordion(label="Advanced Settings", open=False):
221
  with gr.Row(equal_height=True):
222
+ guidance_scale = gr.Number(value=1.2, minimum=0.8, maximum=2.5, step=0.1, label="guidance_scale")
223
+ steps = gr.Number(value=12, minimum=6, maximum=20, step=1, label="steps")
224
+ # strength = gr.Number(value=1, minimum=0.01, maximum=1.0, step=0.01, label="strength")
225
+ seed = gr.Number(value=123456, minimum=0, maximum=999999, step=1, label="seed")
226
  negative_prompt = gr.Textbox(label="negative_prompt", value=default_negative_prompt, placeholder=default_negative_prompt, info="what you don't want to see in the image")
227
 
228
 
 
231
 
232
 
233
 
234
+ btn.click(fn=predict, inputs=[image, prompt, negative_prompt, guidance_scale, steps, strength, seed], outputs=[image_out], api_name='run')
235
  prompt.submit(fn=predict, inputs=[image, prompt, negative_prompt, guidance_scale, steps, strength], outputs=[image_out])
236
 
237
  gr.HTML(