File size: 2,879 Bytes
39a4454
9becae6
 
4e3cfa9
39a4454
9becae6
 
 
 
39a4454
 
9becae6
 
 
 
ae2a815
9becae6
 
 
 
 
 
 
 
 
 
 
 
 
 
4e3cfa9
 
9becae6
 
 
 
 
 
 
 
 
 
ae2a815
9becae6
 
 
 
 
 
 
 
 
3b5780d
9becae6
 
 
 
 
ae2a815
9becae6
7253cb6
 
 
 
 
 
 
9becae6
 
 
39a4454
3b5780d
9becae6
39a4454
9becae6
 
 
 
3b5780d
 
9becae6
 
 
3b5780d
39a4454
3e5e0ae
 
 
 
 
9becae6
39a4454
 
3b5780d
9becae6
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import glob
import gradio as gr
import numpy as np
from os import environ
from PIL import Image
from torchvision import transforms as T
from transformers import MaskFormerForInstanceSegmentation, MaskFormerImageProcessor


example_images = sorted(glob.glob('examples/map*.jpg'))

ade_mean=[0.485, 0.456, 0.406]
ade_std=[0.229, 0.224, 0.225]

palette = [
    [120, 120, 120], [4, 200, 4], [4, 4, 250], [6, 230, 230],
    [80, 50, 50], [120, 120, 80], [140, 140, 140], [204, 5, 255]
]

model_id = f"thiagohersan/maskformer-satellite-trees"

# preprocessor = MaskFormerImageProcessor.from_pretrained(model_id)
preprocessor = MaskFormerImageProcessor(
    do_resize=False,
    do_normalize=False,
    do_rescale=False,
    ignore_index=255,
    reduce_labels=False
)

hf_token = environ.get('HFTOKEN')
model = MaskFormerForInstanceSegmentation.from_pretrained(model_id, use_auth_token=hf_token)

test_transform = T.Compose([
    T.ToTensor(),
    T.Normalize(mean=ade_mean, std=ade_std)
])

def visualize_instance_seg_mask(img_in, mask, id2label):
    img_out = np.zeros((mask.shape[0], mask.shape[1], 3))
    image_total_pixels = mask.shape[0] * mask.shape[1]
    label_ids = np.unique(mask)
    vegetation_labels = ["vegetation"]

    id2color = {id: palette[id] for id in label_ids}
    id2count = {id: 0 for id in label_ids}

    for i in range(img_out.shape[0]):
      for j in range(img_out.shape[1]):
        img_out[i, j, :] = id2color[mask[i, j]]
        id2count[mask[i, j]] = id2count[mask[i, j]] + 1

    image_res = (0.5 * img_in + 0.5 * img_out).astype(np.uint8)

    dataframe = [[
        f"{id2label[id]}",
        f"{(100 * id2count[id] / image_total_pixels):.2f} %",
        f"{np.sqrt(id2count[id] / image_total_pixels):.2f} m"
        ] for id in label_ids if id2label[id] in vegetation_labels]

    if len(dataframe) < 1:
        dataframe = [[
            f"",
            f"{(0):.2f} %",
            f"{(0):.2f} m"
        ]]

    return image_res, dataframe


def query_image(image_path):
    img = np.array(Image.open(image_path))
    img_size = (img.shape[0], img.shape[1])
    inputs = preprocessor(images=test_transform(img), return_tensors="pt")

    outputs = model(**inputs)

    results = preprocessor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[img_size])[0]
    mask_img, dataframe = visualize_instance_seg_mask(img, results.numpy(), model.config.id2label)
    return mask_img, dataframe


demo = gr.Interface(
    fn=query_image,
    inputs=[gr.Image(type="filepath", label="Input Image")],
    outputs=[
        gr.Image(label="Vegetation"),
        gr.DataFrame(label="Info", headers=["Object Label", "Pixel Percent", "Square Length"])
    ],
    title="Maskformer Satellite+Trees",
    allow_flagging="never",
    analytics_enabled=None,
    examples=example_images,
    cache_examples=True
)

demo.launch(show_api=False)