Spaces:
Runtime error
Runtime error
from tensorflow.keras.layers import Input | |
from src.yolo3.model import * | |
from src.yolo3.detect import * | |
from src.utils.image import * | |
from src.utils.datagen import * | |
from src.utils.fixes import * | |
fix_tf_gpu() | |
def prepare_model(approach): | |
''' | |
Prepare the YOLO model | |
''' | |
global input_shape, class_names, anchor_boxes, num_classes, num_anchors, model | |
# shape (height, width) of the imput image | |
input_shape = (416, 416) | |
# class names | |
if approach == 1: | |
class_names = ['H', 'V', 'W'] | |
elif approach == 2: | |
class_names = ['W','WH','WV','WHV'] | |
elif approach == 3: | |
class_names = ['W'] | |
else: | |
raise NotImplementedError('Approach should be 1, 2, or 3') | |
# anchor boxes | |
if approach == 1: | |
anchor_boxes = np.array( | |
[ | |
np.array([[ 76, 59], [ 84, 136], [188, 225]]) /32, # output-1 anchor boxes | |
np.array([[ 25, 15], [ 46, 29], [ 27, 56]]) /16, # output-2 anchor boxes | |
np.array([[ 5, 3], [ 10, 8], [ 12, 26]]) /8 # output-3 anchor boxes | |
], | |
dtype='float64' | |
) | |
else: | |
anchor_boxes = np.array( | |
[ | |
np.array([[ 73, 158], [128, 209], [224, 246]]) /32, # output-1 anchor boxes | |
np.array([[ 32, 50], [ 40, 104], [ 76, 73]]) /16, # output-2 anchor boxes | |
np.array([[ 6, 11], [ 11, 23], [ 19, 36]]) /8 # output-3 anchor boxes | |
], | |
dtype='float64' | |
) | |
# number of classes and number of anchors | |
num_classes = len(class_names) | |
num_anchors = anchor_boxes.shape[0] * anchor_boxes.shape[1] | |
# input and output | |
input_tensor = Input( shape=(input_shape[0], input_shape[1], 3) ) # input | |
num_out_filters = ( num_anchors//3 ) * ( 5 + num_classes ) # output | |
# build the model | |
model = yolo_body(input_tensor, num_out_filters) | |
# load weights | |
weight_path = f'model-data/weights/pictor-ppe-v302-a{approach}-yolo-v3-weights.h5' | |
model.load_weights( weight_path ) | |
def get_detection(img): | |
# save a copy of the img | |
act_img = img.copy() | |
# shape of the image | |
ih, iw = act_img.shape[:2] | |
# preprocess the image | |
img = letterbox_image(img, input_shape) | |
img = np.expand_dims(img, 0) | |
image_data = np.array(img) / 255. | |
# raw prediction from yolo model | |
prediction = model.predict(image_data) | |
# process the raw prediction to get the bounding boxes | |
boxes = detection( | |
prediction, | |
anchor_boxes, | |
num_classes, | |
image_shape=(ih, iw), | |
input_shape=(416, 416), | |
max_boxes=10, | |
score_threshold=0.3, | |
iou_threshold=0.45, | |
classes_can_overlap=False) | |
# convert tensor to numpy | |
boxes = boxes[0].numpy() | |
# draw the detection on the actual image | |
return (draw_detection(act_img, boxes, class_names), boxes) | |
def run (image_in, approach): | |
prepare_model(approach=approach) | |
# input_shape = (416, 416) | |
img = letterbox_image(image_in, input_shape) | |
# get the detection on the image | |
img, all_classes = get_detection(img) | |
#print (all_classes) | |
WHV = 0 | |
WV = 0 | |
WH = 0 | |
W = 0 | |
H = 0 | |
V = 0 | |
for i in all_classes: | |
if class_names[int(i[-1])] == "WHV": | |
WHV += 1 | |
W += 1 | |
elif class_names[int(i[-1])] == "WH": | |
WH += 1 | |
W += 1 | |
elif class_names[int(i[-1])] == "H": | |
H += 1 | |
elif class_names[int(i[-1])] == "V": | |
V += 1 | |
elif class_names[int(i[-1])] == "WV": | |
WV += 1 | |
W += 1 | |
elif class_names[int(i[-1])] == "W": | |
W += 1 | |
#Outputs to display the number of each classes in an interpretable format | |
texts = "" | |
texts = texts + "Total workers: " + str(W) + "\n" | |
if approach != 3: | |
if approach == 1: | |
texts = texts + "Number of Helmets: " + str(H) + "\n" | |
texts = texts + "Number of Vests: " + str(V) + "\n" | |
elif approach == 2: | |
texts = texts + "Workers wearing helmet and vest: " + str(WHV) + "\n" | |
texts = texts + "Workers wearing only vest: " + str(WV) + "\n" | |
texts = texts + "Workers wearing only helmet: " + str(WH) + "\n" | |
if (W > WHV) and (WHV != 0): | |
texts = texts + "Workers not wearing helmet and vest: " + str(W - WHV) + "\n" | |
if (W > WH) and (WH != 0): | |
texts = texts + "Workers not wearing helmet and vest: " + str(W - WH) + "\n" | |
if (W > WV) and (WV != 0): | |
texts = texts + "Workers not wearing helmet and vest: " + str(W - WV) + "\n" | |
return {'img': img[:, :, ::-1], 'text': texts} | |