nvidia-smi
确实训练得要快多了
tudui = Tudui()
tudui = tudui.cuda()
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.cuda()
imgs, targets = data
imgs = imgs.cuda()
targets = targets.cuda()
imgs, targets = data
imgs = imgs.cuda()
targets = targets.cuda()
判断一下,如果有再执行;这样才在CPU或者GPU上都能跑,优先GPU
tudui = Tudui()
if torch.cuda.is_available():
tudui = tudui.cuda()
import time
start_time = time.time()
end_time = time.time()
print(end_time - start_time)
device = torch.device("cpu")
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tudui = Tudui()
tudui = tudui.to(device)
loss_fn = loss_fn.to(device)
#训练+测试部分
imgs = imgs.to(device)
targets = targets.to(device)
其实模型和损失函数不需另外赋值,但训练和测试部分需要
tudui = Tudui()
tudui.to(device)
loss_fn.to(device)
#训练+测试部分
imgs = imgs.to(device)
targets = targets.to(device)