examples
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
from typing import Tuple
|
2 |
|
|
|
3 |
import random
|
4 |
import numpy as np
|
5 |
import gradio as gr
|
@@ -20,6 +21,34 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
20 |
IMAGE_SIZE = 1024
|
21 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
pipe = FluxInpaintPipeline.from_pretrained(
|
24 |
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
|
25 |
|
@@ -49,7 +78,7 @@ def resize_image_dimensions(
|
|
49 |
return new_width, new_height
|
50 |
|
51 |
|
52 |
-
@spaces.GPU(duration=
|
53 |
def process(
|
54 |
input_image_editor: dict,
|
55 |
input_text: str,
|
@@ -61,22 +90,22 @@ def process(
|
|
61 |
):
|
62 |
if not input_text:
|
63 |
gr.Info("Please enter a text prompt.")
|
64 |
-
return None
|
65 |
|
66 |
image = input_image_editor['background']
|
67 |
mask = input_image_editor['layers'][0]
|
68 |
|
69 |
if not image:
|
70 |
gr.Info("Please upload an image.")
|
71 |
-
return None
|
72 |
|
73 |
if not mask:
|
74 |
gr.Info("Please draw a mask on the image.")
|
75 |
-
return None
|
76 |
|
77 |
width, height = resize_image_dimensions(original_resolution_wh=image.size)
|
78 |
resized_image = image.resize((width, height), Image.LANCZOS)
|
79 |
-
resized_mask = mask.resize((width, height), Image.
|
80 |
|
81 |
if randomize_seed_checkbox:
|
82 |
seed_slicer = random.randint(0, MAX_SEED)
|
@@ -153,10 +182,29 @@ with gr.Blocks() as demo:
|
|
153 |
)
|
154 |
with gr.Column():
|
155 |
output_image_component = gr.Image(
|
156 |
-
type='pil', image_mode='RGB', label='Generated image')
|
157 |
with gr.Accordion("Debug", open=False):
|
158 |
output_mask_component = gr.Image(
|
159 |
-
type='pil', image_mode='RGB', label='Input mask')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
|
161 |
submit_button_component.click(
|
162 |
fn=process,
|
|
|
1 |
from typing import Tuple
|
2 |
|
3 |
+
import requests
|
4 |
import random
|
5 |
import numpy as np
|
6 |
import gradio as gr
|
|
|
21 |
IMAGE_SIZE = 1024
|
22 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
23 |
|
24 |
+
|
25 |
+
EXAMPLES = [
|
26 |
+
[
|
27 |
+
{
|
28 |
+
"background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
|
29 |
+
"layers": [Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-2.png", stream=True).raw)],
|
30 |
+
"composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-2.png", stream=True).raw),
|
31 |
+
},
|
32 |
+
"little lion",
|
33 |
+
42,
|
34 |
+
False,
|
35 |
+
0.85,
|
36 |
+
30
|
37 |
+
],
|
38 |
+
[
|
39 |
+
{
|
40 |
+
"background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
|
41 |
+
"layers": [Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-3.png", stream=True).raw)],
|
42 |
+
"composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-3.png", stream=True).raw),
|
43 |
+
},
|
44 |
+
"tattoos",
|
45 |
+
42,
|
46 |
+
False,
|
47 |
+
0.85,
|
48 |
+
30
|
49 |
+
]
|
50 |
+
]
|
51 |
+
|
52 |
pipe = FluxInpaintPipeline.from_pretrained(
|
53 |
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
|
54 |
|
|
|
78 |
return new_width, new_height
|
79 |
|
80 |
|
81 |
+
@spaces.GPU(duration=100)
|
82 |
def process(
|
83 |
input_image_editor: dict,
|
84 |
input_text: str,
|
|
|
90 |
):
|
91 |
if not input_text:
|
92 |
gr.Info("Please enter a text prompt.")
|
93 |
+
return None, None
|
94 |
|
95 |
image = input_image_editor['background']
|
96 |
mask = input_image_editor['layers'][0]
|
97 |
|
98 |
if not image:
|
99 |
gr.Info("Please upload an image.")
|
100 |
+
return None, None
|
101 |
|
102 |
if not mask:
|
103 |
gr.Info("Please draw a mask on the image.")
|
104 |
+
return None, None
|
105 |
|
106 |
width, height = resize_image_dimensions(original_resolution_wh=image.size)
|
107 |
resized_image = image.resize((width, height), Image.LANCZOS)
|
108 |
+
resized_mask = mask.resize((width, height), Image.LANCZOS)
|
109 |
|
110 |
if randomize_seed_checkbox:
|
111 |
seed_slicer = random.randint(0, MAX_SEED)
|
|
|
182 |
)
|
183 |
with gr.Column():
|
184 |
output_image_component = gr.Image(
|
185 |
+
type='pil', image_mode='RGB', label='Generated image', format="png")
|
186 |
with gr.Accordion("Debug", open=False):
|
187 |
output_mask_component = gr.Image(
|
188 |
+
type='pil', image_mode='RGB', label='Input mask', format="png")
|
189 |
+
with gr.Row():
|
190 |
+
gr.Examples(
|
191 |
+
fn=process,
|
192 |
+
examples=EXAMPLES,
|
193 |
+
inputs=[
|
194 |
+
input_image_editor_component,
|
195 |
+
input_text_component,
|
196 |
+
seed_slicer_component,
|
197 |
+
randomize_seed_checkbox_component,
|
198 |
+
strength_slider_component,
|
199 |
+
num_inference_steps_slider_component
|
200 |
+
],
|
201 |
+
outputs=[
|
202 |
+
output_image_component,
|
203 |
+
output_mask_component
|
204 |
+
],
|
205 |
+
run_on_click=True,
|
206 |
+
cache_examples=True
|
207 |
+
)
|
208 |
|
209 |
submit_button_component.click(
|
210 |
fn=process,
|