sagar007 commited on
Commit
9234004
1 Parent(s): 7a38243

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -66
app.py CHANGED
@@ -1,40 +1,34 @@
1
- import spaces
2
  import gradio as gr
3
  import torch
4
- from PIL import Image
5
  from diffusers import DiffusionPipeline
6
- from transformers import AutoModelForCausalLM, AutoTokenizer
7
- import torch
8
  import random
 
 
 
 
 
 
 
 
9
 
10
  # Initialize the base model and specific LoRA
11
- base_model = "art42me/flux-dev-fp8-quantized"
12
- pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
13
- pipe.to("cpu")
14
 
15
  lora_repo = "sagar007/sagar_flux"
16
- trigger_word = "" # Leave trigger_word blank if not used.
17
  pipe.load_lora_weights(lora_repo)
18
 
19
  MAX_SEED = 2**32-1
20
 
21
-
22
  def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
23
- # Set random seed for reproducibility
24
  if randomize_seed:
25
  seed = random.randint(0, MAX_SEED)
26
- generator = torch.Generator(device="cpu").manual_seed(seed)
27
-
28
- # Update progress bar (0% saat mulai)
29
- progress(0, "Starting image generation...")
30
 
31
- # Generate image with progress updates
32
- for i in range(1, steps + 1):
33
- # Simulate the processing step (in a real scenario, you would integrate this with your image generation process)
34
- if i % (steps // 10) == 0: # Update every 10% of the steps
35
- progress(i / steps * 100, f"Processing step {i} of {steps}...")
36
 
37
- # Generate image using the pipeline
38
  image = pipe(
39
  prompt=f"{prompt} {trigger_word}",
40
  num_inference_steps=steps,
@@ -42,56 +36,14 @@ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora
42
  width=width,
43
  height=height,
44
  generator=generator,
45
- joint_attention_kwargs={"scale": lora_scale},
46
  ).images[0]
47
 
48
- # Final update (100%)
49
  progress(100, "Completed!")
50
 
51
- yield image, seed
52
-
53
- # Example cached image and settings
54
- example_image_path = "example0.webp" # Replace with the actual path to the example image
55
- example_prompt = """A Jelita Sukawati speaker is captured mid-speech. She has long, dark brown hair that cascades over her shoulders, framing her radiant, smiling face. Her Latina features are highlighted by warm, sun-kissed skin and bright, expressive eyes. She gestures with her left hand, displaying a delicate ring on her pinky finger, as she speaks passionately.
56
- The woman is wearing a colorful, patterned dress with a green lanyard featuring multiple badges and logos hanging around her neck. The lanyard prominently displays the "CagliostroLab" text.
57
- Behind her, there is a blurred background with a white banner containing logos and text, indicating a professional or conference setting. The overall scene captures the energy and vibrancy of her presentation."""
58
- example_cfg_scale = 3.2
59
- example_steps = 32
60
- example_width = 1152
61
- example_height = 896
62
- example_seed = 3981632454
63
- example_lora_scale = 0.85
64
-
65
- def load_example():
66
- # Load example image from file
67
- example_image = Image.open(example_image_path)
68
- return example_prompt, example_cfg_scale, example_steps, False, example_seed, example_width, example_height, example_lora_scale, example_image
69
-
70
- with gr.Blocks() as app:
71
- gr.Markdown("# Flux RealismLora Image Generator")
72
- with gr.Row():
73
- with gr.Column(scale=3):
74
- prompt = gr.TextArea(label="Prompt", placeholder="Type a prompt", lines=5)
75
- generate_button = gr.Button("Generate")
76
- cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=example_cfg_scale)
77
- steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=example_steps)
78
- width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=example_width)
79
- height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=example_height)
80
- randomize_seed = gr.Checkbox(False, label="Randomize seed")
81
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=example_seed)
82
- lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=example_lora_scale)
83
- with gr.Column(scale=1):
84
- result = gr.Image(label="Generated Image")
85
- gr.Markdown("Generate images using RealismLora and a text prompt.\n[[non-commercial license, Flux.1 Dev](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)]")
86
 
87
- # Automatically load example data and image when the interface is launched
88
- app.load(load_example, inputs=[], outputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, result])
89
-
90
- generate_button.click(
91
- run_lora,
92
- inputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale],
93
- outputs=[result, seed]
94
- )
95
 
96
- app.queue()
97
  app.launch()
 
 
1
  import gradio as gr
2
  import torch
 
3
  from diffusers import DiffusionPipeline
 
 
4
  import random
5
+ from huggingface_hub import login
6
+ import os
7
+
8
+ # Authenticate using the token stored in Hugging Face Spaces secrets
9
+ if 'HF_TOKEN' in os.environ:
10
+ login(token=os.environ['HF_TOKEN'])
11
+ else:
12
+ raise ValueError("HF_TOKEN not found in environment variables. Please add it to your Space's secrets.")
13
 
14
  # Initialize the base model and specific LoRA
15
+ base_model = "black-forest-labs/FLUX.1-dev"
16
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.float32)
17
+ pipe.to("gpu")
18
 
19
  lora_repo = "sagar007/sagar_flux"
20
+ trigger_word = "sagar"
21
  pipe.load_lora_weights(lora_repo)
22
 
23
  MAX_SEED = 2**32-1
24
 
 
25
  def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
 
26
  if randomize_seed:
27
  seed = random.randint(0, MAX_SEED)
28
+ generator = torch.Generator(device="gpu").manual_seed(seed)
 
 
 
29
 
30
+ progress(0, "Starting image generation (this may take a while on CPU)...")
 
 
 
 
31
 
 
32
  image = pipe(
33
  prompt=f"{prompt} {trigger_word}",
34
  num_inference_steps=steps,
 
36
  width=width,
37
  height=height,
38
  generator=generator,
39
+ cross_attention_kwargs={"scale": lora_scale},
40
  ).images[0]
41
 
 
42
  progress(100, "Completed!")
43
 
44
+ return image, seed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
+ # (Rest of the Gradio interface code remains the same)
 
 
 
 
 
 
 
47
 
48
+ # Launch the app
49
  app.launch()