|
""" |
|
Copyright (c) 2019-present NAVER Corp. |
|
|
|
Licensed under the Apache License, Version 2.0 (the "License"); |
|
you may not use this file except in compliance with the License. |
|
You may obtain a copy of the License at |
|
|
|
http://www.apache.org/licenses/LICENSE-2.0 |
|
|
|
Unless required by applicable law or agreed to in writing, software |
|
distributed under the License is distributed on an "AS IS" BASIS, |
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
See the License for the specific language governing permissions and |
|
limitations under the License. |
|
""" |
|
|
|
import torch.nn as nn |
|
|
|
|
|
|
|
|
|
class ResNet_FeatureExtractor(nn.Module): |
|
"""FeatureExtractor of FAN (http://openaccess.thecvf.com/content_ICCV_2017/papers/Cheng_Focusing_Attention_Towards_ICCV_2017_paper.pdf)""" |
|
|
|
def __init__(self, input_channel, output_channel=512): |
|
super(ResNet_FeatureExtractor, self).__init__() |
|
self.ConvNet = ResNet(input_channel, output_channel, BasicBlock, [1, 2, 5, 3]) |
|
|
|
def forward(self, input): |
|
return self.ConvNet(input) |
|
|
|
|
|
class BasicBlock(nn.Module): |
|
expansion = 1 |
|
|
|
def __init__(self, inplanes, planes, stride=1, downsample=None): |
|
super(BasicBlock, self).__init__() |
|
self.conv1 = self._conv3x3(inplanes, planes) |
|
self.bn1 = nn.BatchNorm2d(planes) |
|
self.conv2 = self._conv3x3(planes, planes) |
|
self.bn2 = nn.BatchNorm2d(planes) |
|
self.relu = nn.ReLU(inplace=True) |
|
self.downsample = downsample |
|
self.stride = stride |
|
|
|
def _conv3x3(self, in_planes, out_planes, stride=1): |
|
"3x3 convolution with padding" |
|
return nn.Conv2d( |
|
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False |
|
) |
|
|
|
def forward(self, x): |
|
residual = x |
|
|
|
out = self.conv1(x) |
|
out = self.bn1(out) |
|
out = self.relu(out) |
|
|
|
out = self.conv2(out) |
|
out = self.bn2(out) |
|
|
|
if self.downsample is not None: |
|
residual = self.downsample(x) |
|
out += residual |
|
out = self.relu(out) |
|
|
|
return out |
|
|
|
|
|
class ResNet(nn.Module): |
|
def __init__(self, input_channel, output_channel, block, layers): |
|
super(ResNet, self).__init__() |
|
|
|
self.output_channel_block = [ |
|
int(output_channel / 4), |
|
int(output_channel / 2), |
|
output_channel, |
|
output_channel, |
|
] |
|
|
|
self.inplanes = int(output_channel / 8) |
|
self.conv0_1 = nn.Conv2d( |
|
input_channel, |
|
int(output_channel / 16), |
|
kernel_size=3, |
|
stride=1, |
|
padding=1, |
|
bias=False, |
|
) |
|
self.bn0_1 = nn.BatchNorm2d(int(output_channel / 16)) |
|
self.conv0_2 = nn.Conv2d( |
|
int(output_channel / 16), |
|
self.inplanes, |
|
kernel_size=3, |
|
stride=1, |
|
padding=1, |
|
bias=False, |
|
) |
|
self.bn0_2 = nn.BatchNorm2d(self.inplanes) |
|
self.relu = nn.ReLU(inplace=True) |
|
|
|
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) |
|
self.layer1 = self._make_layer(block, self.output_channel_block[0], layers[0]) |
|
self.conv1 = nn.Conv2d( |
|
self.output_channel_block[0], |
|
self.output_channel_block[0], |
|
kernel_size=3, |
|
stride=1, |
|
padding=1, |
|
bias=False, |
|
) |
|
self.bn1 = nn.BatchNorm2d(self.output_channel_block[0]) |
|
|
|
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) |
|
self.layer2 = self._make_layer( |
|
block, self.output_channel_block[1], layers[1], stride=1 |
|
) |
|
self.conv2 = nn.Conv2d( |
|
self.output_channel_block[1], |
|
self.output_channel_block[1], |
|
kernel_size=3, |
|
stride=1, |
|
padding=1, |
|
bias=False, |
|
) |
|
self.bn2 = nn.BatchNorm2d(self.output_channel_block[1]) |
|
|
|
self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=(2, 1), padding=(0, 1)) |
|
self.layer3 = self._make_layer( |
|
block, self.output_channel_block[2], layers[2], stride=1 |
|
) |
|
self.conv3 = nn.Conv2d( |
|
self.output_channel_block[2], |
|
self.output_channel_block[2], |
|
kernel_size=3, |
|
stride=1, |
|
padding=1, |
|
bias=False, |
|
) |
|
self.bn3 = nn.BatchNorm2d(self.output_channel_block[2]) |
|
|
|
self.layer4 = self._make_layer( |
|
block, self.output_channel_block[3], layers[3], stride=1 |
|
) |
|
self.conv4_1 = nn.Conv2d( |
|
self.output_channel_block[3], |
|
self.output_channel_block[3], |
|
kernel_size=2, |
|
stride=(2, 1), |
|
padding=(0, 1), |
|
bias=False, |
|
) |
|
self.bn4_1 = nn.BatchNorm2d(self.output_channel_block[3]) |
|
self.conv4_2 = nn.Conv2d( |
|
self.output_channel_block[3], |
|
self.output_channel_block[3], |
|
kernel_size=2, |
|
stride=1, |
|
padding=0, |
|
bias=False, |
|
) |
|
self.bn4_2 = nn.BatchNorm2d(self.output_channel_block[3]) |
|
|
|
def _make_layer(self, block, planes, blocks, stride=1): |
|
downsample = None |
|
if stride != 1 or self.inplanes != planes * block.expansion: |
|
downsample = nn.Sequential( |
|
nn.Conv2d( |
|
self.inplanes, |
|
planes * block.expansion, |
|
kernel_size=1, |
|
stride=stride, |
|
bias=False, |
|
), |
|
nn.BatchNorm2d(planes * block.expansion), |
|
) |
|
|
|
layers = [] |
|
layers.append(block(self.inplanes, planes, stride, downsample)) |
|
self.inplanes = planes * block.expansion |
|
for i in range(1, blocks): |
|
layers.append(block(self.inplanes, planes)) |
|
|
|
return nn.Sequential(*layers) |
|
|
|
def forward(self, x): |
|
x = self.conv0_1(x) |
|
x = self.bn0_1(x) |
|
x = self.relu(x) |
|
x = self.conv0_2(x) |
|
x = self.bn0_2(x) |
|
x = self.relu(x) |
|
|
|
x = self.maxpool1(x) |
|
x = self.layer1(x) |
|
x = self.conv1(x) |
|
x = self.bn1(x) |
|
x = self.relu(x) |
|
|
|
x = self.maxpool2(x) |
|
x = self.layer2(x) |
|
x = self.conv2(x) |
|
x = self.bn2(x) |
|
x = self.relu(x) |
|
|
|
x = self.maxpool3(x) |
|
x = self.layer3(x) |
|
x = self.conv3(x) |
|
x = self.bn3(x) |
|
x = self.relu(x) |
|
|
|
x = self.layer4(x) |
|
x = self.conv4_1(x) |
|
x = self.bn4_1(x) |
|
x = self.relu(x) |
|
x = self.conv4_2(x) |
|
x = self.bn4_2(x) |
|
x = self.relu(x) |
|
|
|
return x |
|
|
|
|
|
class STRModel(nn.Module): |
|
def __init__(self, input_channels, output_channels, num_classes): |
|
super(STRModel, self).__init__() |
|
self.FeatureExtraction = ResNet_FeatureExtractor( |
|
input_channels, output_channels |
|
) |
|
self.FeatureExtraction_output = output_channels |
|
self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d( |
|
(self.FeatureExtraction_output, 1) |
|
) |
|
self.SequenceModeling_output = self.FeatureExtraction_output |
|
self.Prediction = nn.Linear(self.SequenceModeling_output, num_classes) |
|
|
|
def forward(self, input): |
|
|
|
"""Feature extraction stage""" |
|
visual_feature = self.FeatureExtraction(input) |
|
visual_feature = self.AdaptiveAvgPool( |
|
visual_feature.permute(0, 3, 1, 2) |
|
) |
|
visual_feature = visual_feature.squeeze(3) |
|
|
|
""" Prediction stage """ |
|
prediction = self.Prediction(visual_feature.contiguous()) |
|
|
|
return prediction |
|
|