后续遇到好的会不断更新。。。
import torch
def gpu_is_available():
print('\nGPU details:')
print(f' gpu_is_available : ', torch.cuda.is_available())
print(f' cuda_device_count : ', torch.cuda.device_count())
print(f' cuda_device_name : ', torch.cuda.get_device_name())
print(f' cuda_device_capability: ', torch.cuda.get_device_capability(0))
gpu_is_available()
#测试pytorch-gpu是否能用
import torch
flag = torch.cuda.is_available()
print(flag)
ngpu= 1
# Decide which device we want to run on
device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")
print('cuda设备名:',device)
print('gpu名称:',torch.cuda.get_device_name(0))
print('pytorch版本:',torch.__version__)
print('cuda版本:',torch.version.cuda)
print('cudnn版本号:',torch.backends.cudnn.version())
print('定义一个torch格式的3*3的矩阵:',torch.rand(3,3).cuda())
来源:“如何测试pytorch-gpu版本和tensorflow-gpu版本是否安装成功”
import torch
# 使用GPU训练
if not torch.cuda.is_available():
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available. Training on GPU ...')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
来源:“深入学习之anaconda、pytorch、cuda安装”
#coding=gbk
import torch
# 定义张量的形状和大小
shape = (100, 1000)
num_tensors = 50000
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
data = [torch.rand(shape, device=device) for _ in range(num_tensors)]
total_sum = torch.tensor([0.0])
for tensor in data:
total_sum += tensor.sum().cpu()
print('Total sum:', total_sum.item())
来源:“测试pytorch-gpu”
#测试tensorflow-gpu是否能用
import tensorflow as tf
print('\n\nGPU',tf.config.list_physical_devices('GPU'))
a = tf.constant(2.)
b = tf.constant(4.)
print('打印a*b:',a * b)
print("tensorflow版本:", tf.__version__)
来源:“如何测试pytorch-gpu版本和tensorflow-gpu版本是否安装成功”
import tensorflow as tf
print(tf.test.is_gpu_available())