李沐深度学习-多层感知机简洁实现

发布时间:2024年01月19日
import torch
import numpy as np
import torch.nn as nn
from torch.nn import init
import torchvision.transforms as transforms
import torch.utils.data as Data
import sys

sys.path.append("路径")
import d2lzh_pytorch as d2l

num_inputs, num_outputs, num_hidden = 784, 10, 256


# X形状转换
class FlattenLayer(nn.Module):
    def __init__(self):
        super(FlattenLayer, self).__init__()

    def forward(self, X):
        return X.view(X.shape[0], -1)


# softmax 操作
def softmax(X):
    X_exp = X.exp()
    partition = X_exp.sum(dim=1, keepdim=True)
    return X_exp / partition


# 测试准确率评估
def evaluate_accuracy(data_iter):
    acc_num, num = 0.0, 0
    for X, y in data_iter:
        acc_num += (softmax(net(X)).argmax(dim=1) == y).sum().item()
        num += y.shape[0]
    return acc_num / num


'''
------------------------------------------定义模型
'''
net = nn.Sequential(
    FlattenLayer(),
    nn.Linear(num_inputs, num_hidden),
    nn.ReLU(),
    nn.Linear(num_hidden, num_outputs)
)
for params in net.parameters():
    init.normal_(params, mean=0, std=0.01)  # 直接对权重和偏置进行了初始化
'''
-----------------------------------------读取数据并训练模型
'''
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
num_epochs = 100


def train():
    for epoch in range(num_epochs):
        train_acc, train_l, test_acc, n, num = 0.0, 0.0, 0.0, 0, 0
        for X, y in train_iter:
            l = loss(net(X), y)

            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            n += y.shape[0]
            num += 1
            train_l += l.item()
            train_acc += (softmax(net(X)).argmax(dim=1) == y).sum().item()
        test_acc = evaluate_accuracy(test_iter)
        print(f'epoch %d, loss %.4f, train_acc %.3f, test_acc %.3f'
              % (epoch + 1, train_l / num, train_acc / n, test_acc))


train()

文章来源:https://blog.csdn.net/qq_43401942/article/details/135707227
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。