File size: 6,297 Bytes
a34b545
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c0bbbfb
a34b545
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a064e53
54e4e45
a34b545
54e4e45
a34b545
54e4e45
 
a34b545
 
 
 
 
 
 
 
 
 
a942e95
a34b545
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
596d046
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
import cv2
import numpy as np
import math
import torch
import random
from PIL import Image

from torch.utils.data import DataLoader
from torchvision.transforms import Resize

torch.manual_seed(12345)
random.seed(12345)
np.random.seed(12345)

device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

class WireframeExtractor:

    def __call__(self, image: np.ndarray):
        """
        Extract corners of wireframe from a barnacle image
        :param image: Numpy RGB image of shape (W, H, 3)
        :return [x1, y1, x2, y2]
        """
        h, w = image.shape[:2]
        imghsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
        hsvblur = cv2.GaussianBlur(imghsv, (9, 9), 0)

        lower = np.array([70, 20, 20])
        upper = np.array([130, 255, 255])

        color_mask = cv2.inRange(hsvblur, lower, upper)

        invert = cv2.bitwise_not(color_mask)

        contours, _ = cv2.findContours(invert, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

        max_contour = contours[0]
        largest_area = 0
        for index, contour in enumerate(contours):
            area = cv2.contourArea(contour)
            if area > largest_area:
                if cv2.pointPolygonTest(contour, (w / 2, h / 2), False) == 1:
                    largest_area = area
                    max_contour = contour

        x, y, w, h = cv2.boundingRect(max_contour)
        # return [x, y, x + w, y + h]
        return x,y,w,h

wireframe_extractor = WireframeExtractor()

def show_anns(anns):
    if len(anns) == 0:
        return
    sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True)
    ax = plt.gca()
    ax.set_autoscale_on(False)
    polygons = []
    color = []
    for ann in sorted_anns:
        m = ann['segmentation']
        img = np.ones((m.shape[0], m.shape[1], 3))
        color_mask = np.random.random((1, 3)).tolist()[0]
        for i in range(3):
            img[:,:,i] = color_mask[i]
        ax.imshow(np.dstack((img, m*0.35)))
    
from segment_anything import sam_model_registry, SamAutomaticMaskGenerator, SamPredictor

model = sam_model_registry["default"](checkpoint="./sam_vit_h_4b8939.pth")
model.to(device)

predictor = SamPredictor(model)

mask_generator = SamAutomaticMaskGenerator(model)

import gradio as gr

import matplotlib.pyplot as plt
import io

def check_circularity(segmentation):
  img_u8 = segmentation.astype(np.uint8)
  im_gauss = cv2.GaussianBlur(img_u8, (5, 5), 0)
  ret, thresh = cv2.threshold(im_gauss, 0, 255, cv2.THRESH_BINARY)
  contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

  con = contours[0]
  perimeter = cv2.arcLength(con, True)
  area = cv2.contourArea(con)
  if perimeter != 0:
    circularity = 4*math.pi*(area/(perimeter*perimeter))
    if 0.8 < circularity < 1.2:
        return True
    else:
      return circularity

def count_barnacles(image_raw, split_num, progress=gr.Progress()):
    progress(0, desc="Finding bounding wire")

    corners = wireframe_extractor(image_raw)
    print(corners) # (0, 0, 1254, 1152)

    cropped_image = image_raw[corners[1]:corners[3]+corners[1], corners[0]:corners[2]+corners[0], :]

    print(cropped_image.shape)

    split_num = 3

    x_inc = int(cropped_image.shape[0]/split_num)
    y_inc = int(cropped_image.shape[1]/split_num)
    startx = -x_inc

    mask_counter = 0
    good_masks = []
    centers = []

    for r in range(0, split_num):
        startx += x_inc
        starty = -y_inc
        for c in range(0, split_num):
            starty += y_inc

            small_image = cropped_image[starty:starty+y_inc, startx:startx+x_inc, :]

            # plt.figure()
            # plt.imshow(small_image)
            # plt.axis('on')
            mask_generator.predictor.set_image(small_image)
            progress(0, desc=f"Generating masks for crop {r*split_num + c}/{split_num ** 2}")
            masks = mask_generator.generate(small_image)
            num_masks = len(masks)
            
            for idx, mask in enumerate(masks):
                progress(float(idx)/float(num_masks), desc=f"Processing masks for crop {r*split_num + c}/{split_num ** 2}")
                circular = check_circularity(mask['segmentation'])
                if circular and mask['area']>500 and mask['area'] < 10000:
                    mask_counter += 1
                    good_masks.append(mask)
                    box = mask['bbox']
                    centers.append((box[0] + box[2]/2 + corners[0] + startx, box[1] + box[3]/2 + corners[1] + starty))

    
    progress(0, desc="Generating Plot")
    # Create a figure with a size of 10 inches by 10 inches
    fig = plt.figure(figsize=(40, 40))

    # Display the image using the imshow() function
    # plt.imshow(cropped_image)
    plt.imshow(image_raw)

    # Call the custom function show_anns() to plot annotations on top of the image
    # show_anns(good_masks)

    for coord in centers: 
        plt.scatter(coord[0], coord[1], marker="x", color="red", s=32)

    # Turn off the axis
    plt.axis('off')

    # Get the plot as a numpy array
    # buf = io.BytesIO()
    # plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
    # buf.seek(0)
    # img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
    # buf.close()

    # # Decode the numpy array to an image
    # annotated = cv2.imdecode(img_arr, 1)
    # annotated = cv2.cvtColor(annotated, cv2.COLOR_BGR2RGB)

    # # Close the figure
    # plt.close(fig)
    
    
    # return annotated, mask_counter, centers
    return fig, mask_counter, centers

demo = gr.Interface(count_barnacles,
                    inputs=[
                        gr.Image(type="numpy", label="Input Image"),
                    ],
                    outputs=[
                        # gr.Image(type="numpy", label="Annotated Image"),
                        gr.Plot(label="Annotated Image"),
                        gr.Number(label="Predicted Number of Barnacles"),
                        gr.Dataframe(type="array", headers=["x", "y"], label="Mask centers")
                        # gr.Number(label="Actual Number of Barnacles"),
                        # gr.Number(label="Custom Metric")
                    ])
                    # examples="examples")
demo.queue(concurrency_count=1).launch()