Spaces:
Runtime error
Runtime error
Commit
•
4ef12bf
1
Parent(s):
472034d
Update app.py (#4)
Browse files- Update app.py (4713eaf95f412781d5a37c855dccd9c0552640ca)
Co-authored-by: Aditya Deshmukh <AdityA36912355@users.noreply.huggingface.co>
app.py
CHANGED
@@ -115,8 +115,10 @@ def predict(
|
|
115 |
seed,
|
116 |
negative_prompt,
|
117 |
task,
|
118 |
-
|
119 |
-
|
|
|
|
|
120 |
):
|
121 |
size1, size2 = input_image["image"].convert("RGB").size
|
122 |
|
@@ -136,40 +138,30 @@ def predict(
|
|
136 |
if task == "object-removal":
|
137 |
prompt = prompt + " empty scene blur"
|
138 |
|
139 |
-
if
|
|
|
|
|
|
|
140 |
o_W, o_H = input_image["image"].convert("RGB").size
|
141 |
-
c_W = int(
|
142 |
-
c_H = int(
|
143 |
|
144 |
expand_img = np.ones((c_H, c_W, 3), dtype=np.uint8) * 127
|
145 |
original_img = np.array(input_image["image"])
|
146 |
expand_img[
|
147 |
-
int(
|
148 |
-
int(
|
149 |
-
|
150 |
] = original_img
|
151 |
|
152 |
blurry_gap = 10
|
153 |
|
154 |
expand_mask = np.ones((c_H, c_W, 3), dtype=np.uint8) * 255
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
] = 0
|
161 |
-
elif vertical_expansion_ratio != 1 and horizontal_expansion_ratio != 1:
|
162 |
-
expand_mask[
|
163 |
-
int((c_H - o_H) / 2.0) + blurry_gap : int((c_H - o_H) / 2.0) + o_H - blurry_gap,
|
164 |
-
int((c_W - o_W) / 2.0) + blurry_gap : int((c_W - o_W) / 2.0) + o_W - blurry_gap,
|
165 |
-
:,
|
166 |
-
] = 0
|
167 |
-
elif vertical_expansion_ratio != 1 and horizontal_expansion_ratio == 1:
|
168 |
-
expand_mask[
|
169 |
-
int((c_H - o_H) / 2.0) + blurry_gap : int((c_H - o_H) / 2.0) + o_H - blurry_gap,
|
170 |
-
int((c_W - o_W) / 2.0) : int((c_W - o_W) / 2.0) + o_W,
|
171 |
-
:,
|
172 |
-
] = 0
|
173 |
|
174 |
input_image["image"] = Image.fromarray(expand_img)
|
175 |
input_image["mask"] = Image.fromarray(expand_mask)
|
@@ -184,11 +176,16 @@ def predict(
|
|
184 |
|
185 |
np_inpimg = np.array(input_image["image"])
|
186 |
np_inmask = np.array(input_image["mask"]) / 255.0
|
|
|
|
|
|
|
187 |
|
188 |
np_inpimg = np_inpimg * (1 - np_inmask)
|
189 |
|
190 |
input_image["image"] = Image.fromarray(np_inpimg.astype(np.uint8)).convert("RGB")
|
191 |
|
|
|
|
|
192 |
set_seed(seed)
|
193 |
global pipe
|
194 |
result = pipe(
|
@@ -245,8 +242,10 @@ def infer(
|
|
245 |
scale,
|
246 |
seed,
|
247 |
task,
|
248 |
-
|
249 |
-
|
|
|
|
|
250 |
outpaint_prompt,
|
251 |
outpaint_negative_prompt,
|
252 |
removal_prompt,
|
@@ -278,8 +277,10 @@ def infer(
|
|
278 |
seed,
|
279 |
negative_prompt,
|
280 |
task,
|
281 |
-
|
282 |
-
|
|
|
|
|
283 |
)
|
284 |
else:
|
285 |
task = "text-guided"
|
@@ -371,15 +372,29 @@ with gr.Blocks(css="style.css") as demo:
|
|
371 |
)
|
372 |
outpaint_prompt = gr.Textbox(label="Outpainting_prompt")
|
373 |
outpaint_negative_prompt = gr.Textbox(label="Outpainting_negative_prompt")
|
374 |
-
|
375 |
-
label="
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
376 |
minimum=1,
|
377 |
maximum=4,
|
378 |
step=0.05,
|
379 |
value=1,
|
380 |
)
|
381 |
-
|
382 |
-
label="
|
383 |
minimum=1,
|
384 |
maximum=4,
|
385 |
step=0.05,
|
@@ -433,8 +448,10 @@ with gr.Blocks(css="style.css") as demo:
|
|
433 |
scale,
|
434 |
seed,
|
435 |
task,
|
436 |
-
|
437 |
-
|
|
|
|
|
438 |
outpaint_prompt,
|
439 |
outpaint_negative_prompt,
|
440 |
removal_prompt,
|
|
|
115 |
seed,
|
116 |
negative_prompt,
|
117 |
task,
|
118 |
+
left_expansion_ratio,
|
119 |
+
right_expansion_ratio,
|
120 |
+
top_expansion_ratio,
|
121 |
+
bottom_expansion_ratio,
|
122 |
):
|
123 |
size1, size2 = input_image["image"].convert("RGB").size
|
124 |
|
|
|
138 |
if task == "object-removal":
|
139 |
prompt = prompt + " empty scene blur"
|
140 |
|
141 |
+
if (
|
142 |
+
left_expansion_ratio is not None and right_expansion_ratio is not None
|
143 |
+
and top_expansion_ratio is not None and bottom_expansion_ratio is not None
|
144 |
+
):
|
145 |
o_W, o_H = input_image["image"].convert("RGB").size
|
146 |
+
c_W = int((1 + left_expansion_ratio + right_expansion_ratio) * o_W)
|
147 |
+
c_H = int((1 + top_expansion_ratio + bottom_expansion_ratio) * o_H)
|
148 |
|
149 |
expand_img = np.ones((c_H, c_W, 3), dtype=np.uint8) * 127
|
150 |
original_img = np.array(input_image["image"])
|
151 |
expand_img[
|
152 |
+
int(top_expansion_ratio * o_H):int(top_expansion_ratio * o_H) + o_H,
|
153 |
+
int(left_expansion_ratio * o_W):int(left_expansion_ratio * o_W) + o_W,
|
154 |
+
:
|
155 |
] = original_img
|
156 |
|
157 |
blurry_gap = 10
|
158 |
|
159 |
expand_mask = np.ones((c_H, c_W, 3), dtype=np.uint8) * 255
|
160 |
+
expand_mask[
|
161 |
+
int(top_expansion_ratio * o_H) + blurry_gap:int(top_expansion_ratio * o_H) + o_H - blurry_gap,
|
162 |
+
int(left_expansion_ratio * o_W) + blurry_gap:int(left_expansion_ratio * o_W) + o_W - blurry_gap,
|
163 |
+
:
|
164 |
+
] = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
|
166 |
input_image["image"] = Image.fromarray(expand_img)
|
167 |
input_image["mask"] = Image.fromarray(expand_mask)
|
|
|
176 |
|
177 |
np_inpimg = np.array(input_image["image"])
|
178 |
np_inmask = np.array(input_image["mask"]) / 255.0
|
179 |
+
if len(np_inmask.shape)==2:
|
180 |
+
np_inmask = np.expand_dims(np_inmask, axis=-1)
|
181 |
+
# return np_inpimg, np_inmask
|
182 |
|
183 |
np_inpimg = np_inpimg * (1 - np_inmask)
|
184 |
|
185 |
input_image["image"] = Image.fromarray(np_inpimg.astype(np.uint8)).convert("RGB")
|
186 |
|
187 |
+
|
188 |
+
# return input_image
|
189 |
set_seed(seed)
|
190 |
global pipe
|
191 |
result = pipe(
|
|
|
242 |
scale,
|
243 |
seed,
|
244 |
task,
|
245 |
+
left_expansion_ratio,
|
246 |
+
right_expansion_ratio,
|
247 |
+
top_expansion_ratio,
|
248 |
+
bottom_expansion_ratio,
|
249 |
outpaint_prompt,
|
250 |
outpaint_negative_prompt,
|
251 |
removal_prompt,
|
|
|
277 |
seed,
|
278 |
negative_prompt,
|
279 |
task,
|
280 |
+
left_expansion_ratio,
|
281 |
+
right_expansion_ratio,
|
282 |
+
top_expansion_ratio,
|
283 |
+
bottom_expansion_ratio
|
284 |
)
|
285 |
else:
|
286 |
task = "text-guided"
|
|
|
372 |
)
|
373 |
outpaint_prompt = gr.Textbox(label="Outpainting_prompt")
|
374 |
outpaint_negative_prompt = gr.Textbox(label="Outpainting_negative_prompt")
|
375 |
+
left_expansion_ratio = gr.Slider(
|
376 |
+
label="left expansion ratio",
|
377 |
+
minimum=1,
|
378 |
+
maximum=4,
|
379 |
+
step=0.05,
|
380 |
+
value=1,
|
381 |
+
)
|
382 |
+
right_expansion_ratio = gr.Slider(
|
383 |
+
label="right expansion ratio",
|
384 |
+
minimum=1,
|
385 |
+
maximum=4,
|
386 |
+
step=0.05,
|
387 |
+
value=1,
|
388 |
+
)
|
389 |
+
top_expansion_ratio = gr.Slider(
|
390 |
+
label="top expansion ratio",
|
391 |
minimum=1,
|
392 |
maximum=4,
|
393 |
step=0.05,
|
394 |
value=1,
|
395 |
)
|
396 |
+
bottom_expansion_ratio = gr.Slider(
|
397 |
+
label="bottom expansion ratio",
|
398 |
minimum=1,
|
399 |
maximum=4,
|
400 |
step=0.05,
|
|
|
448 |
scale,
|
449 |
seed,
|
450 |
task,
|
451 |
+
left_expansion_ratio,
|
452 |
+
right_expansion_ratio,
|
453 |
+
top_expansion_ratio,
|
454 |
+
bottom_expansion_ratio,
|
455 |
outpaint_prompt,
|
456 |
outpaint_negative_prompt,
|
457 |
removal_prompt,
|