import torch
from torch import nn
inputs = torch.tensor([1, 2, 3], dtype=torch.float32)
targets = torch.tensor([1, 2, 5], dtype=torch.float32)
inputs = torch.reshape(inputs, (1, 1, 1, 3))
targets = torch.reshape(targets, (1, 1, 1, 3))
loss = nn.L1Loss()
result = loss(inputs, targets)
print(result) # tensor(0.6667)
loss = nn.L1Loss(reduction='sum')
loss_mse = nn.MSELoss()
result_mse = loss_mse(inputs, targets)
x = torch.tensor([0.1, 0.2, 0.3])
y = torch.tensor([1])
x = torch.reshape(x, (1,3))
loss_cross = nn.CrossEntropyLoss()
result_cross = loss_cross(x, y)
print(result_cross)
import torchvision.datasets
from torch import nn
from torch.utils.data import DataLoader
dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True,
transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset, batch_size=64, shuffle=True)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1, padding=2),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=2),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 5, padding=2),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(1024, 64),
nn.Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
tudui = Tudui()
for data in dataloader:
imgs, targets = data
outputs = tudui(imgs)
print(outputs)
print(targets)
tudui = Tudui()
loss = nn.CrossEntropyLoss()
for data in dataloader:
imgs, targets = data
outputs = tudui(imgs)
result_loss = loss(outputs, targets)
print(result_loss)
1.计算实际输出和目标之间的差距
2.为我们更新输出提供一定的依据(反向传播)grad