29/76-ResNet

发布时间:2024年01月17日

残差网络

类似于使用连接的方式合并两条路

在这里插入图片描述

在这里插入图片描述
在这里插入图片描述

import torch
from torch import nn
import torch.nn.functional as F  #forward函数里面会用到F.relu()
from d2l import torch as d2l
import os
import matplotlib.pyplot as plt
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
class ResidualBlock(nn.Module): #M一定要大写,这是一个经常烦的错误
    #构造方法(构造函数中至少需要传入2个参数:进出的通道数。残差块的一个最主要的作用就是改变信号的通道数)
    def __init__(self,in_channles,num_channles,use_1x1conv=False,strides=1): #第三个参数是是否使用1x1卷积
        super(ResidualBlock, self).__init__()
        self.conv1 = nn.Conv2d(
            in_channles,num_channles,kernel_size=3,stride=strides,padding=1,) #默认为311结构 宽高不会变,但把步长改为2,就会变
        self.conv2 = nn.Conv2d(
            num_channles, num_channles, kernel_size=3, padding=1)  #默认这里宽高也不会变,但把步长改为2,就会变
        if use_1x1conv:
            self.conv3=nn.Conv2d(
                in_channles,num_channles,kernel_size=1,stride=strides)  #这里相当于就是残差连接了
        else:
            self.conv3=None
        self.bn1=nn.BatchNorm2d(num_channles) #批归一化
        self.bn2=nn.BatchNorm2d(num_channles)
        self.relu=nn.ReLU(inplace=True)   #节省内存
    def forward(self,x):
        y= F.relu(self.bn1(self.conv1(x)))
        y=self.bn2(self.conv2(y))
        if self.conv3:
            x=self.conv3(x)
        y+=x
        return F.relu(y)  #在forward里的relu是这样调用的
'''
blk=[]
blk.append(ResidualBlock(64, 64,use_1x1conv=False, strides=1))
blk.append(ResidualBlock(64, 128,use_1x1conv=True, strides=2))
print(blk)

[ResidualBlock(
  (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (relu): ReLU(inplace=True)
), ResidualBlock(
  (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
  (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (conv3): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2))
  (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (relu): ReLU(inplace=True)
)]
'''
blk=ResidualBlock(3,3)
X = torch.rand(4,3,6,6)#输入输出形状一样
blk=ResidualBlock(3,6,use_1x1conv=True,strides=2)#增加输出通道数,减半输出高和宽,4,6,3,3,
Y=blk(X)
print(Y.shape)
#打印网络结构


b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
                   nn.BatchNorm2d(64), nn.ReLU(),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))


def resnet_block(input_channels,num_channels,num_residuals,
                 first_block=False):#输入输出,残差块,判断是否第一个残差块是否减半
  blk=[]
  for i in range(num_residuals):
    if i==0 and not first_block:
      blk.append(
          ResidualBlock(input_channels,num_channels,
                   use_1x1conv=True,strides=2)
      )
    else:
      blk.append(ResidualBlock(num_channels,num_channels))
  return blk

b2 = nn.Sequential(*resnet_block(64,64,2,first_block=True))
b3 = nn.Sequential(*resnet_block(64,128,2))
b4 = nn.Sequential(*resnet_block(128,256,2))
b5 = nn.Sequential(*resnet_block(256,512,2))


net = nn.Sequential(b1, b2, b3, b4, b5,
                    nn.AdaptiveAvgPool2d((1,1)),
                    nn.Flatten(), nn.Linear(512, 10))
X = torch.rand(size=(1, 1, 224, 224))
for layer in net:
  X = layer(X)
  print(layer.__class__.__name__,'output shape:\t', X.shape)

lr, num_epochs, batch_size = 0.05, 10, 128
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)
d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())

plt.show()

#loss 0.015, train acc 0.995, test acc 0.920
#2433.9 examples/sec on cuda:0 有些过拟合,train比test高得有点多

在这里插入图片描述

文章来源:https://blog.csdn.net/weixin_46323807/article/details/135601918
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。