John6666 commited on
Commit
669753f
1 Parent(s): 03400c5

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +13 -13
  2. app.py +337 -336
  3. requirements.txt +12 -10
README.md CHANGED
@@ -1,13 +1,13 @@
1
- ---
2
- title: FLUX.1 [Inpainting with lora]
3
- emoji: 🎨
4
- colorFrom: yellow
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 4.40.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: FLUX.1 [Inpainting with lora]
3
+ emoji: 🎨
4
+ colorFrom: yellow
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 4.40.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,336 +1,337 @@
1
- from typing import Tuple
2
-
3
- import requests
4
- import random
5
- import numpy as np
6
- import gradio as gr
7
- import spaces
8
- import torch
9
- from PIL import Image
10
- from diffusers import FluxInpaintPipeline
11
- from huggingface_hub import login
12
- import os
13
- import time
14
- from gradio_imageslider import ImageSlider
15
-
16
- from diffusers import FlowMatchEulerDiscreteScheduler, AutoencoderKL
17
- from diffusers.models.transformers.transformer_flux import FluxTransformer2DModel
18
- from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
19
- import requests
20
- from io import BytesIO
21
- import PIL.Image
22
- import requests
23
-
24
- MARKDOWN = """
25
- # FLUX.1 Inpainting with lora
26
- """
27
-
28
- MAX_SEED = np.iinfo(np.int32).max
29
- IMAGE_SIZE = 1024
30
- DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
31
- HF_TOKEN = HF
32
-
33
- #login(token=HF_TOKEN)
34
-
35
- bfl_repo="black-forest-labs/FLUX.1-dev"
36
-
37
- class calculateDuration:
38
- def __init__(self, activity_name=""):
39
- self.activity_name = activity_name
40
-
41
- def __enter__(self):
42
- self.start_time = time.time()
43
- self.start_time_formatted = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.start_time))
44
- print(f"Activity: {self.activity_name}, Start time: {self.start_time_formatted}")
45
- return self
46
-
47
- def __exit__(self, exc_type, exc_value, traceback):
48
- self.end_time = time.time()
49
- self.elapsed_time = self.end_time - self.start_time
50
- self.end_time_formatted = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time))
51
-
52
- if self.activity_name:
53
- print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
54
- else:
55
- print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
56
-
57
- print(f"Activity: {self.activity_name}, End time: {self.start_time_formatted}")
58
-
59
-
60
- def remove_background(image: Image.Image, threshold: int = 50) -> Image.Image:
61
- image = image.convert("RGBA")
62
- data = image.getdata()
63
- new_data = []
64
- for item in data:
65
- avg = sum(item[:3]) / 3
66
- if avg < threshold:
67
- new_data.append((0, 0, 0, 0))
68
- else:
69
- new_data.append(item)
70
-
71
- image.putdata(new_data)
72
- return image
73
-
74
- # text_encoder = CLIPTextModel.from_pretrained(os.path.join(os.getcwd(), "flux_text_encoders/clip_l.safetensors"), torch_dtype=dtype)
75
- # tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
76
- # text_encoder_2 = T5EncoderModel.from_pretrained(os.path.join(os.getcwd(), "flux_text_encoders/t5xxl_fp8_e4m3fn.safetensors"), torch_dtype=dtype)
77
- # tokenizer_2 = T5TokenizerFast.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype)
78
- # vae = AutoencoderKL.from_pretrained(bfl_repo, subfolder="vae", torch_dtype=dtype)
79
- # transformer = FluxTransformer2DModel.from_pretrained(bfl_repo, subfolder="transformer", torch_dtype=dtype)
80
-
81
-
82
- pipe = FluxInpaintPipeline.from_pretrained(bfl_repo, torch_dtype=torch.bfloat16).to(DEVICE)
83
-
84
-
85
- def resize_image_dimensions(
86
- original_resolution_wh: Tuple[int, int],
87
- maximum_dimension: int = IMAGE_SIZE
88
- ) -> Tuple[int, int]:
89
- width, height = original_resolution_wh
90
-
91
- # if width <= maximum_dimension and height <= maximum_dimension:
92
- # width = width - (width % 32)
93
- # height = height - (height % 32)
94
- # return width, height
95
-
96
- if width > height:
97
- scaling_factor = maximum_dimension / width
98
- else:
99
- scaling_factor = maximum_dimension / height
100
-
101
- new_width = int(width * scaling_factor)
102
- new_height = int(height * scaling_factor)
103
-
104
- new_width = new_width - (new_width % 32)
105
- new_height = new_height - (new_height % 32)
106
-
107
- return new_width, new_height
108
-
109
-
110
- @spaces.GPU(duration=100)
111
- def process(
112
- input_image_editor: dict,
113
- image_url: str,
114
- mask_url: str,
115
- blur_mask: bool,
116
- blur_factor: int,
117
- lora_path: str,
118
- lora_weights: str,
119
- lora_scale: float,
120
- trigger_word: str,
121
- input_text: str,
122
- seed_slicer: int,
123
- randomize_seed_checkbox: bool,
124
- strength_slider: float,
125
- num_inference_steps_slider: int,
126
- progress=gr.Progress(track_tqdm=True)
127
- ):
128
- if not input_text:
129
- gr.Info("Please enter a text prompt.")
130
- return None, None
131
-
132
- # default image edtiro
133
- image = input_image_editor['background']
134
- mask = input_image_editor['layers'][0]
135
-
136
- if image_url:
137
- print("start to fetch image from url", image_url)
138
- response = requests.get(image_url)
139
- response.raise_for_status()
140
- image = PIL.Image.open(BytesIO(response.content))
141
- print("fetch image success")
142
-
143
- if mask_url:
144
- print("start to fetch mask from url", mask_url)
145
- response = requests.get(mask_url)
146
- response.raise_for_status()
147
- mask = PIL.Image.open(BytesIO(response.content))
148
- print("fetch mask success")
149
-
150
- if not image:
151
- gr.Info("Please upload an image.")
152
- return None, None
153
-
154
- if not mask:
155
- gr.Info("Please draw a mask on the image.")
156
- return None, None
157
- if blur_mask:
158
- mask = pipe.mask_processor.blur(mask, blur_factor=blur_factor)
159
-
160
- with calculateDuration("resize image"):
161
- width, height = resize_image_dimensions(original_resolution_wh=image.size)
162
- resized_image = image.resize((width, height), Image.LANCZOS)
163
- resized_mask = mask.resize((width, height), Image.LANCZOS)
164
-
165
- with calculateDuration("load lora"):
166
- print(lora_path, lora_weights)
167
- pipe.load_lora_weights(lora_path, weight_name=lora_weights)
168
-
169
- if randomize_seed_checkbox:
170
- seed_slicer = random.randint(0, MAX_SEED)
171
- generator = torch.Generator().manual_seed(seed_slicer)
172
-
173
- with calculateDuration("run pipe"):
174
- print(input_text, width, height, strength_slider, num_inference_steps_slider, lora_scale)
175
- result = pipe(
176
- prompt=f"{input_text} {trigger_word}",
177
- image=resized_image,
178
- mask_image=resized_mask,
179
- width=width,
180
- height=height,
181
- strength=strength_slider,
182
- generator=generator,
183
- num_inference_steps=num_inference_steps_slider,
184
- max_sequence_length=256,
185
- joint_attention_kwargs={"scale": lora_scale},
186
- ).images[0]
187
-
188
- return [resized_image, result], resized_mask
189
-
190
-
191
- with gr.Blocks() as demo:
192
- gr.Markdown(MARKDOWN)
193
- with gr.Row():
194
- with gr.Column():
195
- input_image_editor_component = gr.ImageEditor(
196
- label='Image',
197
- type='pil',
198
- sources=["upload", "webcam"],
199
- image_mode='RGB',
200
- layers=False,
201
- brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"))
202
-
203
- image_url = gr.Textbox(
204
- label="image url",
205
- show_label=True,
206
- max_lines=1,
207
- placeholder="Enter your image url (Optional)",
208
- )
209
- mask_url = gr.Textbox(
210
- label="Mask image url",
211
- show_label=True,
212
- max_lines=1,
213
- placeholder="Enter your mask image url (Optional)",
214
- )
215
-
216
- with gr.Accordion("Prompt Settings", open=True):
217
-
218
- input_text_component = gr.Textbox(
219
- label="Inpaint prompt",
220
- show_label=True,
221
- max_lines=1,
222
- placeholder="Enter your prompt",
223
- )
224
- trigger_word = gr.Textbox(
225
- label="Lora trigger word",
226
- show_label=True,
227
- max_lines=1,
228
- placeholder="Enter your lora trigger word here",
229
- value="a photo of TOK"
230
-
231
- )
232
-
233
- submit_button_component = gr.Button(
234
- value='Submit', variant='primary', scale=0)
235
-
236
- with gr.Accordion("Lora Settings", open=True):
237
- lora_path = gr.Textbox(
238
- label="Lora model path",
239
- show_label=True,
240
- max_lines=1,
241
- placeholder="Enter your model path",
242
- info="Currently, only LoRA hosted on Hugging Face'model can be loaded properly.",
243
- value="XLabs-AI/flux-RealismLora"
244
- )
245
- lora_weights = gr.Textbox(
246
- label="Lora weights",
247
- show_label=True,
248
- max_lines=1,
249
- placeholder="Enter your lora weights name",
250
- value="lora.safetensors"
251
- )
252
- lora_scale = gr.Slider(
253
- label="Lora scale",
254
- show_label=True,
255
- minimum=0,
256
- maximum=1,
257
- step=0.1,
258
- value=0.9,
259
- )
260
-
261
- with gr.Accordion("Advanced Settings", open=True):
262
-
263
-
264
- seed_slicer_component = gr.Slider(
265
- label="Seed",
266
- minimum=0,
267
- maximum=MAX_SEED,
268
- step=1,
269
- value=42,
270
- )
271
-
272
- randomize_seed_checkbox_component = gr.Checkbox(
273
- label="Randomize seed", value=True)
274
-
275
- blur_mask = gr.Checkbox(
276
- label="if blur mask", value=False)
277
- blur_factor = gr.Slider(
278
- label="blur factor",
279
- minimum=0,
280
- maximum=50,
281
- step=1,
282
- value=33,
283
- )
284
- with gr.Row():
285
- strength_slider_component = gr.Slider(
286
- label="Strength",
287
- info="Indicates extent to transform the reference `image`. "
288
- "Must be between 0 and 1. `image` is used as a starting "
289
- "point and more noise is added the higher the `strength`.",
290
- minimum=0,
291
- maximum=1,
292
- step=0.01,
293
- value=0.85,
294
- )
295
-
296
- num_inference_steps_slider_component = gr.Slider(
297
- label="Number of inference steps",
298
- info="The number of denoising steps. More denoising steps "
299
- "usually lead to a higher quality image at the",
300
- minimum=1,
301
- maximum=50,
302
- step=1,
303
- value=28,
304
- )
305
- with gr.Column():
306
- output_image_component = ImageSlider(label="Generate image", type="pil", slider_color="pink")
307
-
308
- with gr.Accordion("Debug", open=False):
309
- output_mask_component = gr.Image(
310
- type='pil', image_mode='RGB', label='Input mask', format="png")
311
-
312
- submit_button_component.click(
313
- fn=process,
314
- inputs=[
315
- input_image_editor_component,
316
- image_url,
317
- mask_url,
318
- blur_mask,
319
- blur_factor,
320
- lora_path,
321
- lora_weights,
322
- lora_scale,
323
- trigger_word,
324
- input_text_component,
325
- seed_slicer_component,
326
- randomize_seed_checkbox_component,
327
- strength_slider_component,
328
- num_inference_steps_slider_component
329
- ],
330
- outputs=[
331
- output_image_component,
332
- output_mask_component
333
- ]
334
- )
335
-
336
- demo.launch(debug=False, show_error=True, share=False)
 
 
1
+ from typing import Tuple
2
+
3
+ import requests
4
+ import random
5
+ import numpy as np
6
+ import gradio as gr
7
+ import spaces
8
+ import torch
9
+ from PIL import Image
10
+ from diffusers import FluxInpaintPipeline
11
+ from huggingface_hub import login
12
+ import os
13
+ import time
14
+ from gradio_imageslider import ImageSlider
15
+
16
+ from diffusers import FlowMatchEulerDiscreteScheduler, AutoencoderKL
17
+ from diffusers.models.transformers.transformer_flux import FluxTransformer2DModel
18
+ from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
19
+ import requests
20
+ from io import BytesIO
21
+ import PIL.Image
22
+ import requests
23
+
24
+ MARKDOWN = """
25
+ # FLUX.1 Inpainting with lora
26
+ """
27
+
28
+ MAX_SEED = np.iinfo(np.int32).max
29
+ IMAGE_SIZE = 1024
30
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
31
+ HF_TOKEN = os.environ.get("HF_TOKEN")
32
+
33
+ #login(token=HF_TOKEN)
34
+
35
+ #bfl_repo="black-forest-labs/FLUX.1-dev"
36
+ bfl_repo="camenduru/FLUX.1-dev-diffusers"
37
+
38
+ class calculateDuration:
39
+ def __init__(self, activity_name=""):
40
+ self.activity_name = activity_name
41
+
42
+ def __enter__(self):
43
+ self.start_time = time.time()
44
+ self.start_time_formatted = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.start_time))
45
+ print(f"Activity: {self.activity_name}, Start time: {self.start_time_formatted}")
46
+ return self
47
+
48
+ def __exit__(self, exc_type, exc_value, traceback):
49
+ self.end_time = time.time()
50
+ self.elapsed_time = self.end_time - self.start_time
51
+ self.end_time_formatted = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time))
52
+
53
+ if self.activity_name:
54
+ print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
55
+ else:
56
+ print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
57
+
58
+ print(f"Activity: {self.activity_name}, End time: {self.start_time_formatted}")
59
+
60
+
61
+ def remove_background(image: Image.Image, threshold: int = 50) -> Image.Image:
62
+ image = image.convert("RGBA")
63
+ data = image.getdata()
64
+ new_data = []
65
+ for item in data:
66
+ avg = sum(item[:3]) / 3
67
+ if avg < threshold:
68
+ new_data.append((0, 0, 0, 0))
69
+ else:
70
+ new_data.append(item)
71
+
72
+ image.putdata(new_data)
73
+ return image
74
+
75
+ # text_encoder = CLIPTextModel.from_pretrained(os.path.join(os.getcwd(), "flux_text_encoders/clip_l.safetensors"), torch_dtype=dtype)
76
+ # tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
77
+ # text_encoder_2 = T5EncoderModel.from_pretrained(os.path.join(os.getcwd(), "flux_text_encoders/t5xxl_fp8_e4m3fn.safetensors"), torch_dtype=dtype)
78
+ # tokenizer_2 = T5TokenizerFast.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype)
79
+ # vae = AutoencoderKL.from_pretrained(bfl_repo, subfolder="vae", torch_dtype=dtype)
80
+ # transformer = FluxTransformer2DModel.from_pretrained(bfl_repo, subfolder="transformer", torch_dtype=dtype)
81
+
82
+
83
+ pipe = FluxInpaintPipeline.from_pretrained(bfl_repo, torch_dtype=torch.bfloat16).to(DEVICE)
84
+
85
+
86
+ def resize_image_dimensions(
87
+ original_resolution_wh: Tuple[int, int],
88
+ maximum_dimension: int = IMAGE_SIZE
89
+ ) -> Tuple[int, int]:
90
+ width, height = original_resolution_wh
91
+
92
+ # if width <= maximum_dimension and height <= maximum_dimension:
93
+ # width = width - (width % 32)
94
+ # height = height - (height % 32)
95
+ # return width, height
96
+
97
+ if width > height:
98
+ scaling_factor = maximum_dimension / width
99
+ else:
100
+ scaling_factor = maximum_dimension / height
101
+
102
+ new_width = int(width * scaling_factor)
103
+ new_height = int(height * scaling_factor)
104
+
105
+ new_width = new_width - (new_width % 32)
106
+ new_height = new_height - (new_height % 32)
107
+
108
+ return new_width, new_height
109
+
110
+
111
+ @spaces.GPU(duration=100)
112
+ def process(
113
+ input_image_editor: dict,
114
+ image_url: str,
115
+ mask_url: str,
116
+ blur_mask: bool,
117
+ blur_factor: int,
118
+ lora_path: str,
119
+ lora_weights: str,
120
+ lora_scale: float,
121
+ trigger_word: str,
122
+ input_text: str,
123
+ seed_slicer: int,
124
+ randomize_seed_checkbox: bool,
125
+ strength_slider: float,
126
+ num_inference_steps_slider: int,
127
+ progress=gr.Progress(track_tqdm=True)
128
+ ):
129
+ if not input_text:
130
+ gr.Info("Please enter a text prompt.")
131
+ return None, None
132
+
133
+ # default image edtiro
134
+ image = input_image_editor['background']
135
+ mask = input_image_editor['layers'][0]
136
+
137
+ if image_url:
138
+ print("start to fetch image from url", image_url)
139
+ response = requests.get(image_url)
140
+ response.raise_for_status()
141
+ image = PIL.Image.open(BytesIO(response.content))
142
+ print("fetch image success")
143
+
144
+ if mask_url:
145
+ print("start to fetch mask from url", mask_url)
146
+ response = requests.get(mask_url)
147
+ response.raise_for_status()
148
+ mask = PIL.Image.open(BytesIO(response.content))
149
+ print("fetch mask success")
150
+
151
+ if not image:
152
+ gr.Info("Please upload an image.")
153
+ return None, None
154
+
155
+ if not mask:
156
+ gr.Info("Please draw a mask on the image.")
157
+ return None, None
158
+ if blur_mask:
159
+ mask = pipe.mask_processor.blur(mask, blur_factor=blur_factor)
160
+
161
+ with calculateDuration("resize image"):
162
+ width, height = resize_image_dimensions(original_resolution_wh=image.size)
163
+ resized_image = image.resize((width, height), Image.LANCZOS)
164
+ resized_mask = mask.resize((width, height), Image.LANCZOS)
165
+
166
+ with calculateDuration("load lora"):
167
+ print(lora_path, lora_weights)
168
+ pipe.load_lora_weights(lora_path, weight_name=lora_weights)
169
+
170
+ if randomize_seed_checkbox:
171
+ seed_slicer = random.randint(0, MAX_SEED)
172
+ generator = torch.Generator().manual_seed(seed_slicer)
173
+
174
+ with calculateDuration("run pipe"):
175
+ print(input_text, width, height, strength_slider, num_inference_steps_slider, lora_scale)
176
+ result = pipe(
177
+ prompt=f"{input_text} {trigger_word}",
178
+ image=resized_image,
179
+ mask_image=resized_mask,
180
+ width=width,
181
+ height=height,
182
+ strength=strength_slider,
183
+ generator=generator,
184
+ num_inference_steps=num_inference_steps_slider,
185
+ max_sequence_length=256,
186
+ joint_attention_kwargs={"scale": lora_scale},
187
+ ).images[0]
188
+
189
+ return [resized_image, result], resized_mask
190
+
191
+
192
+ with gr.Blocks() as demo:
193
+ gr.Markdown(MARKDOWN)
194
+ with gr.Row():
195
+ with gr.Column():
196
+ input_image_editor_component = gr.ImageEditor(
197
+ label='Image',
198
+ type='pil',
199
+ sources=["upload", "webcam"],
200
+ image_mode='RGB',
201
+ layers=False,
202
+ brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"))
203
+
204
+ image_url = gr.Textbox(
205
+ label="image url",
206
+ show_label=True,
207
+ max_lines=1,
208
+ placeholder="Enter your image url (Optional)",
209
+ )
210
+ mask_url = gr.Textbox(
211
+ label="Mask image url",
212
+ show_label=True,
213
+ max_lines=1,
214
+ placeholder="Enter your mask image url (Optional)",
215
+ )
216
+
217
+ with gr.Accordion("Prompt Settings", open=True):
218
+
219
+ input_text_component = gr.Textbox(
220
+ label="Inpaint prompt",
221
+ show_label=True,
222
+ max_lines=1,
223
+ placeholder="Enter your prompt",
224
+ )
225
+ trigger_word = gr.Textbox(
226
+ label="Lora trigger word",
227
+ show_label=True,
228
+ max_lines=1,
229
+ placeholder="Enter your lora trigger word here",
230
+ value="a photo of TOK"
231
+
232
+ )
233
+
234
+ submit_button_component = gr.Button(
235
+ value='Submit', variant='primary', scale=0)
236
+
237
+ with gr.Accordion("Lora Settings", open=True):
238
+ lora_path = gr.Textbox(
239
+ label="Lora model path",
240
+ show_label=True,
241
+ max_lines=1,
242
+ placeholder="Enter your model path",
243
+ info="Currently, only LoRA hosted on Hugging Face'model can be loaded properly.",
244
+ value="XLabs-AI/flux-RealismLora"
245
+ )
246
+ lora_weights = gr.Textbox(
247
+ label="Lora weights",
248
+ show_label=True,
249
+ max_lines=1,
250
+ placeholder="Enter your lora weights name",
251
+ value="lora.safetensors"
252
+ )
253
+ lora_scale = gr.Slider(
254
+ label="Lora scale",
255
+ show_label=True,
256
+ minimum=0,
257
+ maximum=1,
258
+ step=0.1,
259
+ value=0.9,
260
+ )
261
+
262
+ with gr.Accordion("Advanced Settings", open=True):
263
+
264
+
265
+ seed_slicer_component = gr.Slider(
266
+ label="Seed",
267
+ minimum=0,
268
+ maximum=MAX_SEED,
269
+ step=1,
270
+ value=42,
271
+ )
272
+
273
+ randomize_seed_checkbox_component = gr.Checkbox(
274
+ label="Randomize seed", value=True)
275
+
276
+ blur_mask = gr.Checkbox(
277
+ label="if blur mask", value=False)
278
+ blur_factor = gr.Slider(
279
+ label="blur factor",
280
+ minimum=0,
281
+ maximum=50,
282
+ step=1,
283
+ value=33,
284
+ )
285
+ with gr.Row():
286
+ strength_slider_component = gr.Slider(
287
+ label="Strength",
288
+ info="Indicates extent to transform the reference `image`. "
289
+ "Must be between 0 and 1. `image` is used as a starting "
290
+ "point and more noise is added the higher the `strength`.",
291
+ minimum=0,
292
+ maximum=1,
293
+ step=0.01,
294
+ value=0.85,
295
+ )
296
+
297
+ num_inference_steps_slider_component = gr.Slider(
298
+ label="Number of inference steps",
299
+ info="The number of denoising steps. More denoising steps "
300
+ "usually lead to a higher quality image at the",
301
+ minimum=1,
302
+ maximum=50,
303
+ step=1,
304
+ value=28,
305
+ )
306
+ with gr.Column():
307
+ output_image_component = ImageSlider(label="Generate image", type="pil", slider_color="pink")
308
+
309
+ with gr.Accordion("Debug", open=False):
310
+ output_mask_component = gr.Image(
311
+ type='pil', image_mode='RGB', label='Input mask', format="png")
312
+
313
+ submit_button_component.click(
314
+ fn=process,
315
+ inputs=[
316
+ input_image_editor_component,
317
+ image_url,
318
+ mask_url,
319
+ blur_mask,
320
+ blur_factor,
321
+ lora_path,
322
+ lora_weights,
323
+ lora_scale,
324
+ trigger_word,
325
+ input_text_component,
326
+ seed_slicer_component,
327
+ randomize_seed_checkbox_component,
328
+ strength_slider_component,
329
+ num_inference_steps_slider_component
330
+ ],
331
+ outputs=[
332
+ output_image_component,
333
+ output_mask_component
334
+ ]
335
+ )
336
+
337
+ demo.launch(debug=False, show_error=True)
requirements.txt CHANGED
@@ -1,10 +1,12 @@
1
- gradio
2
- spaces
3
- accelerate
4
- transformers==4.42.4
5
- sentencepiece
6
- git+https://github.com/Gothos/diffusers.git@flux-inpaint
7
- huggingface_hub
8
- peft
9
- gradio_imageslider
10
- requests
 
 
 
1
+ gradio
2
+ spaces
3
+ accelerate
4
+ transformers==4.42.4
5
+ sentencepiece
6
+ #git+https://github.com/Gothos/diffusers.git@flux-inpaint
7
+ diffusers
8
+ huggingface_hub
9
+ peft
10
+ gradio_imageslider
11
+ requests
12
+ numpy<2