fountai commited on
Commit
d34a4b1
1 Parent(s): 81d55cf
Files changed (4) hide show
  1. README.md +3 -2
  2. app.py +178 -0
  3. flux +1 -0
  4. requirements.txt +15 -0
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: No Name
3
- emoji: 📊
4
  colorFrom: blue
5
  colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 4.42.0
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Flux Advanced Explorer
3
+ emoji: 🦄
4
  colorFrom: blue
5
  colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 4.42.0
8
  app_file: app.py
9
  pinned: false
10
+ short_description: With IP Adapter
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ import os
4
+ from flux.src.flux.xflux_pipeline import XFluxPipeline
5
+ import random
6
+ import spaces
7
+
8
+ def run_xflux_pipeline(
9
+ prompt, image, repo_id, name, device,
10
+ model_type, width, height, timestep_to_start_cfg, num_steps, true_gs, guidance,
11
+ neg_prompt="",
12
+ negative_image=None,
13
+ save_path='results', control_type='depth', use_controlnet=False, seed=None, num_images_per_prompt=1, use_lora=False, lora_weight=0.7, lora_repo_id="XLabs-AI/flux-lora-collection", lora_name="realism_lora.safetensors", use_ip=False
14
+ ):
15
+ # Montando os argumentos simulando a linha de comando
16
+ class Args:
17
+ def __init__(self):
18
+ self.prompt = prompt
19
+ self.image = image
20
+ self.control_type = control_type
21
+ self.repo_id = repo_id
22
+ self.name = name
23
+ self.device = device
24
+ self.use_controlnet = use_controlnet
25
+ self.model_type = model_type
26
+ self.width = width
27
+ self.height = height
28
+ self.timestep_to_start_cfg = timestep_to_start_cfg
29
+ self.num_steps = num_steps
30
+ self.true_gs = true_gs
31
+ self.guidance = guidance
32
+ self.num_images_per_prompt = num_images_per_prompt
33
+ self.seed = seed if seed else 123456789
34
+ self.neg_prompt = neg_prompt
35
+ self.img_prompt = Image.open(image)
36
+ self.neg_img_prompt = Image.open(negative_image) if negative_image else None
37
+ self.ip_scale = 1.0
38
+ self.neg_ip_scale = 1.0
39
+ self.local_path = None
40
+ self.ip_repo_id = "XLabs-AI/flux-ip-adapter"
41
+ self.ip_name = "flux-ip-adapter.safetensors"
42
+ self.ip_local_path = None
43
+ self.lora_repo_id = lora_repo_id
44
+ self.lora_name = lora_name
45
+ self.lora_local_path = None
46
+ self.offload = False
47
+ self.use_ip = use_ip
48
+ self.use_lora = use_lora
49
+ self.lora_weight = lora_weight
50
+ self.save_path = save_path
51
+
52
+ args = Args()
53
+
54
+ # Carregar a imagem se fornecida
55
+ if args.image:
56
+ image = Image.open(args.image)
57
+ else:
58
+ image = None
59
+
60
+ # Inicializar o pipeline com os parâmetros necessários
61
+ xflux_pipeline = XFluxPipeline(args.model_type, args.device, args.offload)
62
+
63
+ # Configurar ControlNet se necessário
64
+ if args.use_controlnet:
65
+ print('Loading ControlNet:', args.local_path, args.repo_id, args.name)
66
+ xflux_pipeline.set_controlnet(args.control_type, args.local_path, args.repo_id, args.name)
67
+ if args.use_ip:
68
+ print('load ip-adapter:', args.ip_local_path, args.ip_repo_id, args.ip_name)
69
+ xflux_pipeline.set_ip(args.ip_local_path, args.ip_repo_id, args.ip_name)
70
+ if args.use_lora:
71
+ print('load lora:', args.lora_local_path, args.lora_repo_id, args.lora_name)
72
+ xflux_pipeline.set_lora(args.lora_local_path, args.lora_repo_id, args.lora_name, args.lora_weight)
73
+
74
+ # Laço para gerar imagens
75
+ images = []
76
+ for _ in range(args.num_images_per_prompt):
77
+ seed = random.randint(0, 2147483647)
78
+ result = xflux_pipeline(
79
+ prompt=args.prompt,
80
+ controlnet_image=image,
81
+ width=args.width,
82
+ height=args.height,
83
+ guidance=args.guidance,
84
+ num_steps=args.num_steps,
85
+ seed=seed,
86
+ true_gs=args.true_gs,
87
+ neg_prompt=args.neg_prompt,
88
+ timestep_to_start_cfg=args.timestep_to_start_cfg,
89
+ image_prompt=args.img_prompt,
90
+ neg_image_prompt=args.neg_img_prompt,
91
+ ip_scale=args.ip_scale,
92
+ neg_ip_scale=args.neg_ip_scale,
93
+ )
94
+ images.append(result)
95
+
96
+ return images
97
+
98
+ @spaces.GPU(duration=500)
99
+ def process_image(image, prompt, steps, use_lora, use_controlnet, use_depth, use_hed, use_ip, lora_name, lora_path, lora_weight, negative_image, neg_prompt, true_gs, guidance, cfg):
100
+ return run_xflux_pipeline(
101
+ prompt=prompt,
102
+ neg_prompt=neg_prompt,
103
+ image=image,
104
+ negative_image=negative_image,
105
+ lora_name=lora_name,
106
+ lora_weight=lora_weight,
107
+ lora_repo_id=lora_path,
108
+ control_type="depth" if use_depth else "hed" if use_hed else "canny",
109
+ repo_id="XLabs-AI/flux-controlnet-collections",
110
+ name="flux-depth-controlnet.safetensors",
111
+ device="cuda",
112
+ use_controlnet=use_controlnet,
113
+ model_type="flux-dev",
114
+ width=1024,
115
+ height=1024,
116
+ timestep_to_start_cfg=cfg,
117
+ num_steps=steps,
118
+ num_images_per_prompt=4,
119
+ use_lora=use_lora,
120
+ true_gs=true_gs,
121
+ use_ip=use_ip,
122
+ guidance=guidance
123
+ )
124
+
125
+
126
+ custom_css = """
127
+ body {
128
+ background: rgb(24, 24, 27);
129
+ }
130
+
131
+ .gradio-container {
132
+ background: rgb(24, 24, 27);
133
+ }
134
+
135
+ .app-container {
136
+ background: rgb(24, 24, 27);
137
+ }
138
+
139
+ gradio-app {
140
+ background: rgb(24, 24, 27);
141
+ }
142
+
143
+
144
+ .sidebar {
145
+ background: rgb(31, 31, 35);
146
+ border-right: 1px solid rgb(41, 41, 41);
147
+ }
148
+ """
149
+
150
+ with gr.Blocks(css=custom_css) as demo:
151
+ with gr.Row(elem_classes="app-container"):
152
+ with gr.Column():
153
+ input_image = gr.Image(label="Image", type="filepath")
154
+ negative_image = gr.Image(label="Negative_image", type="filepath")
155
+ submit_btn = gr.Button("Submit")
156
+
157
+ with gr.Column():
158
+ prompt = gr.Textbox(label="Prompt")
159
+ neg_prompt = gr.Textbox(label="Neg Prompt")
160
+ steps = gr.Slider(step=1, minimum=1, maximum=64, value=28, label="Num Steps")
161
+ use_lora = gr.Checkbox(label="Use LORA", value=True)
162
+ lora_path = gr.Textbox(label="LoraPath", value="XLabs-AI/flux-lora-collection")
163
+ lora_name = gr.Textbox(label="LoraName", value="realism_lora.safetensors")
164
+ lora_weight = gr.Slider(step=0.1, minimum=0, maximum=1, value=0.7, label="Lora Weight")
165
+ controlnet = gr.Checkbox(label="Use Controlnet(by default uses canny)", value=True)
166
+ use_ip = gr.Checkbox(label="Use IP")
167
+ use_depth = gr.Checkbox(label="Use depth")
168
+ use_hed = gr.Checkbox(label="Use hed")
169
+ true_gs = gr.Slider(step=0.1, minimum=0, maximum=10, value=3.5, label="TrueGs")
170
+ guidance = gr.Slider(minimum=1, maximum=10, value=4, label="Guidance")
171
+ cfg = gr.Slider(minimum=1, maximum=10, value=1, label="CFG")
172
+
173
+ with gr.Column():
174
+ output = gr.Gallery(label="Galery output", elem_classes="galery", selected_index=0)
175
+
176
+ submit_btn.click(process_image, inputs=[input_image, prompt, steps, use_lora, controlnet, use_depth, use_hed, use_ip, lora_name, lora_path, lora_weight, negative_image, neg_prompt, true_gs, guidance, cfg], outputs=output)
177
+
178
+ demo.launch(share=True)
flux ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit 9e1dd391b2316b1cfc20e523e2885fd30134a2e4
requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.30.1
2
+ deepspeed==0.14.4
3
+ einops==0.8.0
4
+ transformers==4.43.3
5
+ huggingface-hub==0.24.5
6
+ optimum-quanto
7
+ datasets
8
+ omegaconf
9
+ diffusers
10
+ sentencepiece
11
+ opencv-python
12
+ matplotlib
13
+ onnxruntime
14
+ torchvision
15
+ timm