torch提供了很多损失函数,可查看官方文档Loss Functions部分
损失函数用法差不多,这里以L1Loss和MSEloss为例
import torch
from torch.nn import L1Loss
from torch.nn import MSELoss
inputs = torch.tensor([1,2,3],dtype = torch.float32)
outputs = torch.tensor([1,2,5],dtype = torch.float32)
inputs = torch.reshape(inputs, (1,1,1,3))
outputs = torch.reshape(outputs, (1,1,1,3))
# L1Loss()
loss = L1Loss()
result = loss(inputs, outputs)
print(result)
# MSELoss()
loss_mse = MSELoss()
result_mse = loss_mse(inputs, outputs)
print(result_mse)
from torch import nn
import torch
from torch.utils.tensorboard import SummaryWriter
import torchvision
from torch.utils.data import DataLoader
dataset = torchvision.datasets.CIFAR10('./dataset', train=False, transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset=dataset, batch_size=1)
class Test(nn.Module):
def __init__(self):
super(Test,self).__init__()
self.model1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, padding=2),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, padding=2), # 计算同上
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2),
nn.MaxPool2d(kernel_size=2),
nn.Flatten() ,
nn.Linear(1024, 64),
nn.Linear(64, 10),
)
def forward(self, x):
x = self.model1(x)
return x
net = Test()
loss = nn.CrossEntropyLoss()
for data in dataloader:
imgs, targets = data
output = net(imgs)
resulr_loss = loss(output, targets)
print(resulr_loss)
加上反向传播后:
from torch import nn
import torch
from torch.utils.tensorboard import SummaryWriter
import torchvision
from torch.utils.data import DataLoader
dataset = torchvision.datasets.CIFAR10('./dataset', train=False, transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset=dataset, batch_size=1)
class Test(nn.Module):
def __init__(self):
super(Test,self).__init__()
self.model1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, padding=2),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, padding=2), # 计算同上
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2),
nn.MaxPool2d(kernel_size=2),
nn.Flatten() ,
nn.Linear(1024, 64),
nn.Linear(64, 10),
)
def forward(self, x):
x = self.model1(x)
return x
# 这就不需要像之前那种一样一个一个调用了
# 这样网络就写完了
net = Test()
loss = nn.CrossEntropyLoss()
for data in dataloader:
imgs, targets = data
output = net(imgs)
result_loss = loss(output, targets)
result_loss.backward() # 注意不是loss.backward(),而是result_loss.backward()
print('ok')
backward行打断点,进入调试界面可以查看网络内部的参数
weighr里面有grad
运行到backward之前,grad里是none
运行完之后,计算出梯度
后面可以使用优化器,利用计算出来的梯度,对神经网络进行更新