File size: 4,743 Bytes
7fc7f3d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162

from tensorflow.keras.layers import Input

from src.yolo3.model import *
from src.yolo3.detect import *

from src.utils.image import *
from src.utils.datagen import *
from src.utils.fixes import *

fix_tf_gpu()
def prepare_model(approach):
    '''
    Prepare the YOLO model
    '''
    global input_shape, class_names, anchor_boxes, num_classes, num_anchors, model

    # shape (height, width) of the imput image
    input_shape = (416, 416)

    # class names
    if approach == 1:
        class_names = ['H', 'V', 'W']

    elif approach == 2:
        class_names  = ['W','WH','WV','WHV']

    elif approach == 3:
        class_names  = ['W']

    else:
        raise NotImplementedError('Approach should be 1, 2, or 3')

    # anchor boxes
    if approach == 1:
        anchor_boxes = np.array(
            [
            np.array([[ 76,  59], [ 84, 136], [188, 225]]) /32, # output-1 anchor boxes
            np.array([[ 25,  15], [ 46,  29], [ 27,  56]]) /16, # output-2 anchor boxes
            np.array([[ 5,    3], [ 10,   8], [ 12,  26]]) /8   # output-3 anchor boxes
            ],
            dtype='float64'
        )
    else:
        anchor_boxes = np.array(
            [
            np.array([[ 73, 158], [128, 209], [224, 246]]) /32, # output-1 anchor boxes
            np.array([[ 32,  50], [ 40, 104], [ 76,  73]]) /16, # output-2 anchor boxes
            np.array([[ 6,   11], [ 11,  23], [ 19,  36]]) /8   # output-3 anchor boxes
            ],
            dtype='float64'
        )

    # number of classes and number of anchors
    num_classes = len(class_names)
    num_anchors = anchor_boxes.shape[0] * anchor_boxes.shape[1]

    # input and output
    input_tensor = Input( shape=(input_shape[0], input_shape[1], 3) ) # input
    num_out_filters = ( num_anchors//3 ) * ( 5 + num_classes )        # output

    # build the model
    model = yolo_body(input_tensor, num_out_filters)

    # load weights
    weight_path = f'model-data/weights/pictor-ppe-v302-a{approach}-yolo-v3-weights.h5'
    model.load_weights( weight_path )


def get_detection(img):
    # save a copy of the img
    act_img = img.copy()

    # shape of the image
    ih, iw = act_img.shape[:2]

    # preprocess the image
    img = letterbox_image(img, input_shape)
    img = np.expand_dims(img, 0)
    image_data = np.array(img) / 255.

    # raw prediction from yolo model
    prediction = model.predict(image_data)

    # process the raw prediction to get the bounding boxes
    boxes = detection(
        prediction,
        anchor_boxes,
        num_classes,
        image_shape=(ih, iw),
        input_shape=(416, 416),
        max_boxes=10,
        score_threshold=0.3,
        iou_threshold=0.45,
        classes_can_overlap=False)

    # convert tensor to numpy
    boxes = boxes[0].numpy()

    # draw the detection on the actual image
    return (draw_detection(act_img, boxes, class_names), boxes)



def run (image_in, approach):
    prepare_model(approach=approach)

#    input_shape = (416, 416)
    img = letterbox_image(image_in, input_shape)

    # get the detection on the image
    img, all_classes = get_detection(img)

    #print (all_classes)
    WHV = 0
    WV = 0
    WH = 0
    W = 0
    H = 0
    V = 0
    for i in all_classes:
        if class_names[int(i[-1])] == "WHV":
            WHV += 1
            W += 1
        elif class_names[int(i[-1])] == "WH":
            WH += 1
            W += 1
        elif class_names[int(i[-1])] == "H":
            H += 1
        elif class_names[int(i[-1])] == "V":
            V += 1
        elif class_names[int(i[-1])] == "WV":
            WV += 1
            W += 1
        elif class_names[int(i[-1])] == "W":
            W += 1

    #Outputs to display the number of each classes in an interpretable format
    texts = ""
    texts = texts + "Total workers: " + str(W) + "\n"
    if approach != 3:
        if approach == 1:
            texts = texts + "Number of Helmets: " + str(H) + "\n"
            texts = texts + "Number of Vests: " + str(V) + "\n"

        elif approach == 2:

            texts = texts + "Workers wearing helmet and vest: " + str(WHV) + "\n"
            texts = texts + "Workers wearing only vest: " + str(WV) + "\n"
            texts = texts + "Workers wearing only helmet: " + str(WH) + "\n"

            if (W > WHV) and (WHV != 0):
                texts = texts + "Workers not wearing helmet and vest: " + str(W - WHV) + "\n"

            if (W > WH) and (WH != 0):
                texts = texts + "Workers not wearing helmet and vest: " + str(W - WH) + "\n"

            if (W > WV) and (WV != 0):
                texts = texts + "Workers not wearing helmet and vest: " + str(W - WV) + "\n"

    return {'img': img[:, :, ::-1], 'text': texts}