Spaces:
Runtime error
Runtime error
liuyizhang
commited on
Commit
•
ed4b6a1
1
Parent(s):
e12d135
update app.py
Browse files
app.py
CHANGED
@@ -12,7 +12,7 @@ else:
|
|
12 |
# run_gradio = True
|
13 |
|
14 |
if run_gradio:
|
15 |
-
os.system("pip install gradio==3.
|
16 |
|
17 |
import gradio as gr
|
18 |
|
@@ -346,21 +346,29 @@ def load_lama_cleaner_model(device):
|
|
346 |
|
347 |
def lama_cleaner_process(image, mask, cleaner_size_limit=1080):
|
348 |
try:
|
|
|
349 |
ori_image = image
|
350 |
if mask.shape[0] == image.shape[1] and mask.shape[1] == image.shape[0] and mask.shape[0] != mask.shape[1]:
|
351 |
# rotate image
|
|
|
352 |
ori_image = np.transpose(image[::-1, ...][:, ::-1], axes=(1, 0, 2))[::-1, ...]
|
|
|
353 |
image = ori_image
|
354 |
|
|
|
355 |
original_shape = ori_image.shape
|
|
|
356 |
interpolation = cv2.INTER_CUBIC
|
357 |
|
358 |
size_limit = cleaner_size_limit
|
359 |
if size_limit == -1:
|
|
|
360 |
size_limit = max(image.shape)
|
361 |
else:
|
|
|
362 |
size_limit = int(size_limit)
|
363 |
|
|
|
364 |
config = lama_Config(
|
365 |
ldm_steps=25,
|
366 |
ldm_sampler='plms',
|
@@ -385,22 +393,30 @@ def lama_cleaner_process(image, mask, cleaner_size_limit=1080):
|
|
385 |
cv2_radius=5,
|
386 |
)
|
387 |
|
|
|
388 |
if config.sd_seed == -1:
|
389 |
config.sd_seed = random.randint(1, 999999999)
|
390 |
|
391 |
# logger.info(f"Origin image shape_0_: {original_shape} / {size_limit}")
|
|
|
392 |
image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
|
393 |
# logger.info(f"Resized image shape_1_: {image.shape}")
|
394 |
|
395 |
# logger.info(f"mask image shape_0_: {mask.shape} / {type(mask)}")
|
|
|
396 |
mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
|
397 |
# logger.info(f"mask image shape_1_: {mask.shape} / {type(mask)}")
|
398 |
|
|
|
399 |
res_np_img = lama_cleaner_model(image, mask, config)
|
|
|
400 |
torch.cuda.empty_cache()
|
401 |
|
|
|
402 |
image = Image.open(io.BytesIO(numpy_to_bytes(res_np_img, 'png')))
|
|
|
403 |
except Exception as e:
|
|
|
404 |
image = None
|
405 |
return image
|
406 |
|
@@ -863,7 +879,8 @@ def main_gradio(args):
|
|
863 |
if kosmos_enable:
|
864 |
task_types.append("Kosmos-2")
|
865 |
|
866 |
-
input_image = gr.Image(source='upload', elem_id="image_upload", tool='sketch', type='pil', label="Upload"
|
|
|
867 |
task_type = gr.Radio(task_types, value="detection",
|
868 |
label='Task type', visible=True)
|
869 |
mask_source_radio = gr.Radio([mask_source_draw, mask_source_segment],
|
@@ -894,7 +911,7 @@ def main_gradio(args):
|
|
894 |
remove_mask_extend = gr.Textbox(label="remove_mask_extend", value='10')
|
895 |
|
896 |
with gr.Column():
|
897 |
-
image_gallery = gr.Gallery(label="result images", show_label=True, elem_id="gallery", visible=True
|
898 |
).style(preview=True, columns=[5], object_fit="scale-down", height="auto")
|
899 |
time_cost = gr.Textbox(label="Time cost by step (ms):", visible=False, interactive=False)
|
900 |
|
@@ -1011,8 +1028,10 @@ class API_Starter:
|
|
1011 |
def handle_data(self, data):
|
1012 |
im_b64 = data['img']
|
1013 |
img = base64_to_PILImage(im_b64)
|
|
|
|
|
1014 |
results = run_anything_task(input_image = img,
|
1015 |
-
text_prompt =
|
1016 |
task_type = 'remove',
|
1017 |
inpaint_prompt = '',
|
1018 |
box_threshold = 0.3,
|
@@ -1021,7 +1040,7 @@ class API_Starter:
|
|
1021 |
inpaint_mode = "merge",
|
1022 |
mask_source_radio = "type what to detect below",
|
1023 |
remove_mode = "rectangle", # ["segment", "rectangle"]
|
1024 |
-
remove_mask_extend = f"{
|
1025 |
num_relation = 5,
|
1026 |
kosmos_input = None,
|
1027 |
cleaner_size_limit = -1,
|
@@ -1107,20 +1126,12 @@ if __name__ == "__main__":
|
|
1107 |
if os.environ.get('IS_MY_DEBUG') is None:
|
1108 |
os.system("pip list")
|
1109 |
|
1110 |
-
# print(f'groundingdino_model__{get_model_device(groundingdino_model)}')
|
1111 |
-
# print(f'sam_model__{get_model_device(sam_model)}')
|
1112 |
-
# print(f'sd_model__{get_model_device(sd_model)}')
|
1113 |
-
# print(f'lama_cleaner_model__{get_model_device(lama_cleaner_model)}')
|
1114 |
-
# print(f'ram_model__{get_model_device(ram_model)}')
|
1115 |
-
# print(f'kosmos_model__{get_model_device(kosmos_model)}')
|
1116 |
-
|
1117 |
if run_gradio:
|
1118 |
# Provide gradio services
|
1119 |
main_gradio(args)
|
1120 |
else:
|
1121 |
# Provide API services
|
1122 |
main_api(args)
|
1123 |
-
|
1124 |
-
|
1125 |
|
1126 |
|
|
|
12 |
# run_gradio = True
|
13 |
|
14 |
if run_gradio:
|
15 |
+
os.system("pip install gradio==3.50.2")
|
16 |
|
17 |
import gradio as gr
|
18 |
|
|
|
346 |
|
347 |
def lama_cleaner_process(image, mask, cleaner_size_limit=1080):
|
348 |
try:
|
349 |
+
logger.info(f'_______lama_cleaner_process_______1____')
|
350 |
ori_image = image
|
351 |
if mask.shape[0] == image.shape[1] and mask.shape[1] == image.shape[0] and mask.shape[0] != mask.shape[1]:
|
352 |
# rotate image
|
353 |
+
logger.info(f'_______lama_cleaner_process_______2____')
|
354 |
ori_image = np.transpose(image[::-1, ...][:, ::-1], axes=(1, 0, 2))[::-1, ...]
|
355 |
+
logger.info(f'_______lama_cleaner_process_______3____')
|
356 |
image = ori_image
|
357 |
|
358 |
+
logger.info(f'_______lama_cleaner_process_______4____')
|
359 |
original_shape = ori_image.shape
|
360 |
+
logger.info(f'_______lama_cleaner_process_______5____')
|
361 |
interpolation = cv2.INTER_CUBIC
|
362 |
|
363 |
size_limit = cleaner_size_limit
|
364 |
if size_limit == -1:
|
365 |
+
logger.info(f'_______lama_cleaner_process_______6____')
|
366 |
size_limit = max(image.shape)
|
367 |
else:
|
368 |
+
logger.info(f'_______lama_cleaner_process_______7____')
|
369 |
size_limit = int(size_limit)
|
370 |
|
371 |
+
logger.info(f'_______lama_cleaner_process_______8____')
|
372 |
config = lama_Config(
|
373 |
ldm_steps=25,
|
374 |
ldm_sampler='plms',
|
|
|
393 |
cv2_radius=5,
|
394 |
)
|
395 |
|
396 |
+
logger.info(f'_______lama_cleaner_process_______9____')
|
397 |
if config.sd_seed == -1:
|
398 |
config.sd_seed = random.randint(1, 999999999)
|
399 |
|
400 |
# logger.info(f"Origin image shape_0_: {original_shape} / {size_limit}")
|
401 |
+
logger.info(f'_______lama_cleaner_process_______10____')
|
402 |
image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
|
403 |
# logger.info(f"Resized image shape_1_: {image.shape}")
|
404 |
|
405 |
# logger.info(f"mask image shape_0_: {mask.shape} / {type(mask)}")
|
406 |
+
logger.info(f'_______lama_cleaner_process_______11____')
|
407 |
mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
|
408 |
# logger.info(f"mask image shape_1_: {mask.shape} / {type(mask)}")
|
409 |
|
410 |
+
logger.info(f'_______lama_cleaner_process_______12____')
|
411 |
res_np_img = lama_cleaner_model(image, mask, config)
|
412 |
+
logger.info(f'_______lama_cleaner_process_______13____')
|
413 |
torch.cuda.empty_cache()
|
414 |
|
415 |
+
logger.info(f'_______lama_cleaner_process_______14____')
|
416 |
image = Image.open(io.BytesIO(numpy_to_bytes(res_np_img, 'png')))
|
417 |
+
logger.info(f'_______lama_cleaner_process_______15____')
|
418 |
except Exception as e:
|
419 |
+
logger.info(f'lama_cleaner_process[Error]:' + str(e))
|
420 |
image = None
|
421 |
return image
|
422 |
|
|
|
879 |
if kosmos_enable:
|
880 |
task_types.append("Kosmos-2")
|
881 |
|
882 |
+
input_image = gr.Image(source='upload', elem_id="image_upload", tool='sketch', type='pil', label="Upload",
|
883 |
+
height=512, brush_color='#00FFFF', mask_opacity=0.6)
|
884 |
task_type = gr.Radio(task_types, value="detection",
|
885 |
label='Task type', visible=True)
|
886 |
mask_source_radio = gr.Radio([mask_source_draw, mask_source_segment],
|
|
|
911 |
remove_mask_extend = gr.Textbox(label="remove_mask_extend", value='10')
|
912 |
|
913 |
with gr.Column():
|
914 |
+
image_gallery = gr.Gallery(label="result images", show_label=True, elem_id="gallery", height=512, visible=True
|
915 |
).style(preview=True, columns=[5], object_fit="scale-down", height="auto")
|
916 |
time_cost = gr.Textbox(label="Time cost by step (ms):", visible=False, interactive=False)
|
917 |
|
|
|
1028 |
def handle_data(self, data):
|
1029 |
im_b64 = data['img']
|
1030 |
img = base64_to_PILImage(im_b64)
|
1031 |
+
remove_texts = data['remove_texts']
|
1032 |
+
remove_mask_extend = data['mask_extend']
|
1033 |
results = run_anything_task(input_image = img,
|
1034 |
+
text_prompt = f"{remove_texts}",
|
1035 |
task_type = 'remove',
|
1036 |
inpaint_prompt = '',
|
1037 |
box_threshold = 0.3,
|
|
|
1040 |
inpaint_mode = "merge",
|
1041 |
mask_source_radio = "type what to detect below",
|
1042 |
remove_mode = "rectangle", # ["segment", "rectangle"]
|
1043 |
+
remove_mask_extend = f"{remove_mask_extend}",
|
1044 |
num_relation = 5,
|
1045 |
kosmos_input = None,
|
1046 |
cleaner_size_limit = -1,
|
|
|
1126 |
if os.environ.get('IS_MY_DEBUG') is None:
|
1127 |
os.system("pip list")
|
1128 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1129 |
if run_gradio:
|
1130 |
# Provide gradio services
|
1131 |
main_gradio(args)
|
1132 |
else:
|
1133 |
# Provide API services
|
1134 |
main_api(args)
|
1135 |
+
|
|
|
1136 |
|
1137 |
|