小伙伴们,该如何给YOLOv5算法网络添加注意力:

主要改这三个文件:分别是 common.py、yolo.py以及以及配置文件.yaml。

1.打开common.py,复制粘贴注意力模块

 例如:卷积注意力CBAM

CBAM代码段:
class ChannelAttention(nn.Module): def __init__(self, in_planes, ratio=16):
super(ChannelAttention, self).__init__() self.avg_pool =
nn.AdaptiveAvgPool2d(1) self.max_pool = nn.AdaptiveMaxPool2d(1) self.f1 =
nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False) self.relu = nn.ReLU()
self.f2 = nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False) #
写法二,亦可使用顺序容器 # self.sharedMLP = nn.Sequential( # nn.Conv2d(in_planes, in_planes
// ratio, 1, bias=False), nn.ReLU(), # nn.Conv2d(in_planes // rotio, in_planes,
1, bias=False)) self.sigmoid = nn.Sigmoid() def forward(self, x): avg_out =
self.f2(self.relu(self.f1(self.avg_pool(x)))) max_out =
self.f2(self.relu(self.f1(self.max_pool(x)))) out = self.sigmoid(avg_out +
max_out) return out class SpatialAttention(nn.Module): def __init__(self,
kernel_size=7): super(SpatialAttention, self).__init__() assert kernel_size in
(3, 7), 'kernel size must be 3 or 7' padding = 3 if kernel_size == 7 else 1
self.conv = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid() def forward(self, x): avg_out = torch.mean(x,
dim=1, keepdim=True) max_out, _ = torch.max(x, dim=1, keepdim=True) x =
torch.cat([avg_out, max_out], dim=1) x = self.conv(x) return self.sigmoid(x)
class CBAM(nn.Module): # CSP Bottleneck with 3 convolutions def __init__(self,
c1, c2, ratio=16, kernel_size=7): # ch_in, ch_out, number, shortcut, groups,
expansion super(CBAM, self).__init__() # c_ = int(c2 * e) # hidden channels #
self.cv1 = Conv(c1, c_, 1, 1) # self.cv2 = Conv(c1, c_, 1, 1) # self.cv3 =
Conv(2 * c_, c2, 1) # self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g,
e=1.0) for _ in range(n)]) self.channel_attention = ChannelAttention(c1, ratio)
self.spatial_attention = SpatialAttention(kernel_size) # self.m =
nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
def forward(self, x): out = self.channel_attention(x) * x #
print('outchannels:{}'.format(out.shape)) out = self.spatial_attention(out) *
out return out
2.打开yolo.py文件,加入注意力模块

 3.修改配置文件.yaml即可

可加在backbone或者neck部分,具体根据实际情况进行修改。

技术
下载桌面版
GitHub
Gitee
SourceForge
百度网盘(提取码:draw)
云服务器优惠
华为云优惠券
腾讯云优惠券
阿里云优惠券
Vultr优惠券
站点信息
问题反馈
邮箱:[email protected]
吐槽一下
QQ群:766591547
关注微信