DCNv2简介
可变形卷积网络的卓越性能源于其适应物体几何变化的能力。通过对其自适应行为的检查,我们观察到,虽然对其神经特征的空间支持比常规的 ConvNet 更接近对象结构,但这种支持可能远远超出感兴趣区域,导致特征受到不相关图像内容的影响。为了解决这个问题,我们提出了可变形卷积网的重新表述,通过提高建模能力和更强的训练,提高了其关注相关图像区域的能力。通过更全面地集成网络内的可变形卷积,并引入扩大变形建模范围的调制机制,增强了建模能力。为了有效地利用这种丰富的建模能力,我们通过提出的特征模拟方案来指导网络训练,该方案帮助网络学习反映R-CNN特征的对象焦点和分类能力的特征。有了这些贡献,这个新版本的 Deformable ConvNets 与原始模型相比产生了显着的性能提升,并在对象检测和实例分割的 COCO 基准上产生了领先的结果。
原文地址:Deformable ConvNets v2: More Deformable, Better Results
代码实现
pytorch代码实现DCNv2模块
class DCNv2(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=1, dilation=1, groups=1, deformable_groups=1):
super(DCNv2, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = (kernel_size, kernel_size)
self.stride = (stride, stride)
self.padding = (padding, padding)
self.dilation = (dilation, dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.weight = nn.Parameter(
torch.empty(out_channels, in_channels, *self.kernel_size)
)
self.bias = nn.Parameter(torch.empty(out_channels))
out_channels_offset_mask = (self.deformable_groups * 3 *
self.kernel_size[0] * self.kernel_size[1])
self.conv_offset_mask = nn.Conv2d(
self.in_channels,
out_channels_offset_mask,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
bias=True,
)
self.bn = nn.BatchNorm2d(out_channels)
self.act = Conv.default_act
self.reset_parameters()
def forward(self, x):
offset_mask = self.conv_offset_mask(x)
o1, o2, mask = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
x = torch.ops.torchvision.deform_conv2d(
x,
self.weight,
offset,
mask,
self.bias,
self.stride[0], self.stride[1],
self.padding[0], self.padding[1],
self.dilation[0], self.dilation[1],
self.groups,
self.deformable_groups,
True
)
x = self.bn(x)
x = self.act(x)
return x
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
std = 1. / math.sqrt(n)
self.weight.data.uniform_(-std, std)
self.bias.data.zero_()
self.conv_offset_mask.weight.data.zero_()
self.conv_offset_mask.bias.data.zero_()
构建Bottleneck_DCN模块
class Bottleneck_DCN(nn.Module):
# Standard bottleneck with DCN
def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5): # ch_in, ch_out, shortcut, groups, kernels, expand
super().__init__()
c_ = int(c2 * e) # hidden channels
if k[0] == 3:
self.cv1 = DCNv2(c1, c_, k[0], 1)
else:
self.cv1 = Conv(c1, c_, k[0], 1)
if k[1] == 3:
self.cv2 = DCNv2(c_, c2, k[1], 1, groups=g)
else:
self.cv2 = Conv(c_, c2, k[1], 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
C2f_DCN模块定义
class C2f_DCN(nn.Module):
# CSP Bottleneck with 2 convolutions
def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super().__init__()
self.c = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, 2 * self.c, 1, 1)
self.cv2 = Conv((2 + n) * self.c, c2, 1) # optional act=FReLU(c2)
self.m = nn.ModuleList(Bottleneck_DCN(self.c, self.c, shortcut, g, k=(3, 3), e=1.0) for _ in range(n))
def forward(self, x):
y = list(self.cv1(x).split((self.c, self.c), 1))
y.extend(m(y[-1]) for m in self.m)
return self.cv2(torch.cat(y, 1))
最后将C2f_DCN模块替换目标检测算法对应的模块就可以拉!比如v5的C3或者v8的C2f等。家人们,论文水起来吧~