Aditibaheti commited on
Commit
e2ebf4e
1 Parent(s): d353d19

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -10
app.py CHANGED
@@ -23,8 +23,11 @@ pipeline = DiffusionPipeline.from_pretrained(
23
  use_auth_token=HUGGINGFACE_TOKEN
24
  )
25
  pipeline.load_lora_weights(lora_weights_path)
26
- #pipeline.enable_sequential_cpu_offload() # Efficient memory usage
27
- #pipeline.enable_xformers_memory_efficient_attention() # Enable xformers memory efficient attention
 
 
 
28
  pipeline = pipeline.to(device)
29
 
30
  MAX_SEED = np.iinfo(np.int32).max
@@ -138,17 +141,13 @@ with gr.Blocks(css=css) as demo:
138
  value=30,
139
  )
140
 
141
- gr.Examples(
142
- examples=examples,
143
- inputs=[prompt],
144
- fn=infer,
145
- outputs=[result]
146
- )
147
-
148
  run_button.click(
149
  fn=infer,
150
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
151
  outputs=[result]
152
  )
153
 
154
- demo.queue().launch(share=True)
 
23
  use_auth_token=HUGGINGFACE_TOKEN
24
  )
25
  pipeline.load_lora_weights(lora_weights_path)
26
+
27
+ if device == "cuda":
28
+ pipeline.enable_sequential_cpu_offload() # Efficient memory usage
29
+ pipeline.enable_xformers_memory_efficient_attention() # Enable xformers memory efficient attention
30
+
31
  pipeline = pipeline.to(device)
32
 
33
  MAX_SEED = np.iinfo(np.int32).max
 
141
  value=30,
142
  )
143
 
144
+ for example in examples:
145
+ gr.Button(example).click(lambda e=example: prompt.set_value(e))
146
+
 
 
 
 
147
  run_button.click(
148
  fn=infer,
149
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
150
  outputs=[result]
151
  )
152
 
153
+ demo.queue().launch()