Spaces:
Runtime error
Runtime error
# Copyright (c) OpenMMLab. All rights reserved. | |
import warnings | |
import torch | |
import torch.nn as nn | |
from mmcv.cnn import ConvModule | |
from mmcv.ops import DeformConv2d | |
from mmcv.runner import BaseModule | |
from mmdet.core import multi_apply | |
from mmdet.core.utils import filter_scores_and_topk | |
from ..builder import HEADS | |
from .anchor_free_head import AnchorFreeHead | |
INF = 1e8 | |
class FeatureAlign(BaseModule): | |
def __init__(self, | |
in_channels, | |
out_channels, | |
kernel_size=3, | |
deform_groups=4, | |
init_cfg=dict( | |
type='Normal', | |
layer='Conv2d', | |
std=0.1, | |
override=dict( | |
type='Normal', name='conv_adaption', std=0.01))): | |
super(FeatureAlign, self).__init__(init_cfg) | |
offset_channels = kernel_size * kernel_size * 2 | |
self.conv_offset = nn.Conv2d( | |
4, deform_groups * offset_channels, 1, bias=False) | |
self.conv_adaption = DeformConv2d( | |
in_channels, | |
out_channels, | |
kernel_size=kernel_size, | |
padding=(kernel_size - 1) // 2, | |
deform_groups=deform_groups) | |
self.relu = nn.ReLU(inplace=True) | |
def forward(self, x, shape): | |
offset = self.conv_offset(shape) | |
x = self.relu(self.conv_adaption(x, offset)) | |
return x | |
class FoveaHead(AnchorFreeHead): | |
"""FoveaBox: Beyond Anchor-based Object Detector | |
https://arxiv.org/abs/1904.03797 | |
""" | |
def __init__(self, | |
num_classes, | |
in_channels, | |
base_edge_list=(16, 32, 64, 128, 256), | |
scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, | |
512)), | |
sigma=0.4, | |
with_deform=False, | |
deform_groups=4, | |
init_cfg=dict( | |
type='Normal', | |
layer='Conv2d', | |
std=0.01, | |
override=dict( | |
type='Normal', | |
name='conv_cls', | |
std=0.01, | |
bias_prob=0.01)), | |
**kwargs): | |
self.base_edge_list = base_edge_list | |
self.scale_ranges = scale_ranges | |
self.sigma = sigma | |
self.with_deform = with_deform | |
self.deform_groups = deform_groups | |
super().__init__(num_classes, in_channels, init_cfg=init_cfg, **kwargs) | |
def _init_layers(self): | |
# box branch | |
super()._init_reg_convs() | |
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) | |
# cls branch | |
if not self.with_deform: | |
super()._init_cls_convs() | |
self.conv_cls = nn.Conv2d( | |
self.feat_channels, self.cls_out_channels, 3, padding=1) | |
else: | |
self.cls_convs = nn.ModuleList() | |
self.cls_convs.append( | |
ConvModule( | |
self.feat_channels, (self.feat_channels * 4), | |
3, | |
stride=1, | |
padding=1, | |
conv_cfg=self.conv_cfg, | |
norm_cfg=self.norm_cfg, | |
bias=self.norm_cfg is None)) | |
self.cls_convs.append( | |
ConvModule((self.feat_channels * 4), (self.feat_channels * 4), | |
1, | |
stride=1, | |
padding=0, | |
conv_cfg=self.conv_cfg, | |
norm_cfg=self.norm_cfg, | |
bias=self.norm_cfg is None)) | |
self.feature_adaption = FeatureAlign( | |
self.feat_channels, | |
self.feat_channels, | |
kernel_size=3, | |
deform_groups=self.deform_groups) | |
self.conv_cls = nn.Conv2d( | |
int(self.feat_channels * 4), | |
self.cls_out_channels, | |
3, | |
padding=1) | |
def forward_single(self, x): | |
cls_feat = x | |
reg_feat = x | |
for reg_layer in self.reg_convs: | |
reg_feat = reg_layer(reg_feat) | |
bbox_pred = self.conv_reg(reg_feat) | |
if self.with_deform: | |
cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp()) | |
for cls_layer in self.cls_convs: | |
cls_feat = cls_layer(cls_feat) | |
cls_score = self.conv_cls(cls_feat) | |
return cls_score, bbox_pred | |
def loss(self, | |
cls_scores, | |
bbox_preds, | |
gt_bbox_list, | |
gt_label_list, | |
img_metas, | |
gt_bboxes_ignore=None): | |
assert len(cls_scores) == len(bbox_preds) | |
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] | |
points = self.prior_generator.grid_priors( | |
featmap_sizes, | |
dtype=bbox_preds[0].dtype, | |
device=bbox_preds[0].device) | |
num_imgs = cls_scores[0].size(0) | |
flatten_cls_scores = [ | |
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) | |
for cls_score in cls_scores | |
] | |
flatten_bbox_preds = [ | |
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) | |
for bbox_pred in bbox_preds | |
] | |
flatten_cls_scores = torch.cat(flatten_cls_scores) | |
flatten_bbox_preds = torch.cat(flatten_bbox_preds) | |
flatten_labels, flatten_bbox_targets = self.get_targets( | |
gt_bbox_list, gt_label_list, featmap_sizes, points) | |
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes | |
pos_inds = ((flatten_labels >= 0) | |
& (flatten_labels < self.num_classes)).nonzero().view(-1) | |
num_pos = len(pos_inds) | |
loss_cls = self.loss_cls( | |
flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs) | |
if num_pos > 0: | |
pos_bbox_preds = flatten_bbox_preds[pos_inds] | |
pos_bbox_targets = flatten_bbox_targets[pos_inds] | |
pos_weights = pos_bbox_targets.new_zeros( | |
pos_bbox_targets.size()) + 1.0 | |
loss_bbox = self.loss_bbox( | |
pos_bbox_preds, | |
pos_bbox_targets, | |
pos_weights, | |
avg_factor=num_pos) | |
else: | |
loss_bbox = torch.tensor( | |
0, | |
dtype=flatten_bbox_preds.dtype, | |
device=flatten_bbox_preds.device) | |
return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) | |
def get_targets(self, gt_bbox_list, gt_label_list, featmap_sizes, points): | |
label_list, bbox_target_list = multi_apply( | |
self._get_target_single, | |
gt_bbox_list, | |
gt_label_list, | |
featmap_size_list=featmap_sizes, | |
point_list=points) | |
flatten_labels = [ | |
torch.cat([ | |
labels_level_img.flatten() for labels_level_img in labels_level | |
]) for labels_level in zip(*label_list) | |
] | |
flatten_bbox_targets = [ | |
torch.cat([ | |
bbox_targets_level_img.reshape(-1, 4) | |
for bbox_targets_level_img in bbox_targets_level | |
]) for bbox_targets_level in zip(*bbox_target_list) | |
] | |
flatten_labels = torch.cat(flatten_labels) | |
flatten_bbox_targets = torch.cat(flatten_bbox_targets) | |
return flatten_labels, flatten_bbox_targets | |
def _get_target_single(self, | |
gt_bboxes_raw, | |
gt_labels_raw, | |
featmap_size_list=None, | |
point_list=None): | |
gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) * | |
(gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1])) | |
label_list = [] | |
bbox_target_list = [] | |
# for each pyramid, find the cls and box target | |
for base_len, (lower_bound, upper_bound), stride, featmap_size, \ | |
points in zip(self.base_edge_list, self.scale_ranges, | |
self.strides, featmap_size_list, point_list): | |
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes | |
points = points.view(*featmap_size, 2) | |
x, y = points[..., 0], points[..., 1] | |
labels = gt_labels_raw.new_zeros(featmap_size) + self.num_classes | |
bbox_targets = gt_bboxes_raw.new(featmap_size[0], featmap_size[1], | |
4) + 1 | |
# scale assignment | |
hit_indices = ((gt_areas >= lower_bound) & | |
(gt_areas <= upper_bound)).nonzero().flatten() | |
if len(hit_indices) == 0: | |
label_list.append(labels) | |
bbox_target_list.append(torch.log(bbox_targets)) | |
continue | |
_, hit_index_order = torch.sort(-gt_areas[hit_indices]) | |
hit_indices = hit_indices[hit_index_order] | |
gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride | |
gt_labels = gt_labels_raw[hit_indices] | |
half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0]) | |
half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1]) | |
# valid fovea area: left, right, top, down | |
pos_left = torch.ceil( | |
gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long(). \ | |
clamp(0, featmap_size[1] - 1) | |
pos_right = torch.floor( | |
gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long(). \ | |
clamp(0, featmap_size[1] - 1) | |
pos_top = torch.ceil( | |
gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long(). \ | |
clamp(0, featmap_size[0] - 1) | |
pos_down = torch.floor( | |
gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long(). \ | |
clamp(0, featmap_size[0] - 1) | |
for px1, py1, px2, py2, label, (gt_x1, gt_y1, gt_x2, gt_y2) in \ | |
zip(pos_left, pos_top, pos_right, pos_down, gt_labels, | |
gt_bboxes_raw[hit_indices, :]): | |
labels[py1:py2 + 1, px1:px2 + 1] = label | |
bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = \ | |
(x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len | |
bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = \ | |
(y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len | |
bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = \ | |
(gt_x2 - x[py1:py2 + 1, px1:px2 + 1]) / base_len | |
bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = \ | |
(gt_y2 - y[py1:py2 + 1, px1:px2 + 1]) / base_len | |
bbox_targets = bbox_targets.clamp(min=1. / 16, max=16.) | |
label_list.append(labels) | |
bbox_target_list.append(torch.log(bbox_targets)) | |
return label_list, bbox_target_list | |
# Same as base_dense_head/_get_bboxes_single except self._bbox_decode | |
def _get_bboxes_single(self, | |
cls_score_list, | |
bbox_pred_list, | |
score_factor_list, | |
mlvl_priors, | |
img_meta, | |
cfg, | |
rescale=False, | |
with_nms=True, | |
**kwargs): | |
"""Transform outputs of a single image into bbox predictions. | |
Args: | |
cls_score_list (list[Tensor]): Box scores from all scale | |
levels of a single image, each item has shape | |
(num_priors * num_classes, H, W). | |
bbox_pred_list (list[Tensor]): Box energies / deltas from | |
all scale levels of a single image, each item has shape | |
(num_priors * 4, H, W). | |
score_factor_list (list[Tensor]): Score factor from all scale | |
levels of a single image. Fovea head does not need this value. | |
mlvl_priors (list[Tensor]): Each element in the list is | |
the priors of a single level in feature pyramid, has shape | |
(num_priors, 2). | |
img_meta (dict): Image meta info. | |
cfg (mmcv.Config): Test / postprocessing configuration, | |
if None, test_cfg would be used. | |
rescale (bool): If True, return boxes in original image space. | |
Default: False. | |
with_nms (bool): If True, do nms before return boxes. | |
Default: True. | |
Returns: | |
tuple[Tensor]: Results of detected bboxes and labels. If with_nms | |
is False and mlvl_score_factor is None, return mlvl_bboxes and | |
mlvl_scores, else return mlvl_bboxes, mlvl_scores and | |
mlvl_score_factor. Usually with_nms is False is used for aug | |
test. If with_nms is True, then return the following format | |
- det_bboxes (Tensor): Predicted bboxes with shape \ | |
[num_bboxes, 5], where the first 4 columns are bounding \ | |
box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ | |
column are scores between 0 and 1. | |
- det_labels (Tensor): Predicted labels of the corresponding \ | |
box with shape [num_bboxes]. | |
""" | |
cfg = self.test_cfg if cfg is None else cfg | |
assert len(cls_score_list) == len(bbox_pred_list) | |
img_shape = img_meta['img_shape'] | |
nms_pre = cfg.get('nms_pre', -1) | |
mlvl_bboxes = [] | |
mlvl_scores = [] | |
mlvl_labels = [] | |
for level_idx, (cls_score, bbox_pred, stride, base_len, priors) in \ | |
enumerate(zip(cls_score_list, bbox_pred_list, self.strides, | |
self.base_edge_list, mlvl_priors)): | |
assert cls_score.size()[-2:] == bbox_pred.size()[-2:] | |
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) | |
scores = cls_score.permute(1, 2, 0).reshape( | |
-1, self.cls_out_channels).sigmoid() | |
# After https://github.com/open-mmlab/mmdetection/pull/6268/, | |
# this operation keeps fewer bboxes under the same `nms_pre`. | |
# There is no difference in performance for most models. If you | |
# find a slight drop in performance, you can set a larger | |
# `nms_pre` than before. | |
results = filter_scores_and_topk( | |
scores, cfg.score_thr, nms_pre, | |
dict(bbox_pred=bbox_pred, priors=priors)) | |
scores, labels, _, filtered_results = results | |
bbox_pred = filtered_results['bbox_pred'] | |
priors = filtered_results['priors'] | |
bboxes = self._bbox_decode(priors, bbox_pred, base_len, img_shape) | |
mlvl_bboxes.append(bboxes) | |
mlvl_scores.append(scores) | |
mlvl_labels.append(labels) | |
return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, | |
img_meta['scale_factor'], cfg, rescale, | |
with_nms) | |
def _bbox_decode(self, priors, bbox_pred, base_len, max_shape): | |
bbox_pred = bbox_pred.exp() | |
y = priors[:, 1] | |
x = priors[:, 0] | |
x1 = (x - base_len * bbox_pred[:, 0]). \ | |
clamp(min=0, max=max_shape[1] - 1) | |
y1 = (y - base_len * bbox_pred[:, 1]). \ | |
clamp(min=0, max=max_shape[0] - 1) | |
x2 = (x + base_len * bbox_pred[:, 2]). \ | |
clamp(min=0, max=max_shape[1] - 1) | |
y2 = (y + base_len * bbox_pred[:, 3]). \ | |
clamp(min=0, max=max_shape[0] - 1) | |
decoded_bboxes = torch.stack([x1, y1, x2, y2], -1) | |
return decoded_bboxes | |
def _get_points_single(self, *args, **kwargs): | |
"""Get points according to feature map size. | |
This function will be deprecated soon. | |
""" | |
warnings.warn( | |
'`_get_points_single` in `FoveaHead` will be ' | |
'deprecated soon, we support a multi level point generator now' | |
'you can get points of a single level feature map ' | |
'with `self.prior_generator.single_level_grid_priors` ') | |
y, x = super()._get_points_single(*args, **kwargs) | |
return y + 0.5, x + 0.5 | |