目录
Windows 10
GPU RTX 3090 + CUDA 11.1 + cudnn 8.9.6
Python 3.9
Torch 1.9.1 + cu111
所用的原始代码:https://github.com/yanx27/Pointnet_Pointnet2_pytorch
有Classification属性的已经分类的LAS点云
分享给有需要的人,代码质量勿喷。
对原始代码进行了简化和注释。
分割结果保存成txt,或者利用 laspy 生成点云。
别问为啥在C盘,问就是2T的三星980Pro
完整代码:https://download.csdn.net/download/xinjiang666/88755213?spm=1001.2014.3001.5501
# 6通道特征:块相对坐标;全局相对坐标:有效果
# ### 分类的类别
classNumber = 2 #0-未分类;1-路面
# 训练用
class lasDataset(Dataset):
def __init__(self, split='train', data_root='dataset', train_ratio=0.6,val_ratio=0.2,test_ratio=0.2, num_point=1024, block_size=1.0, sample_rate=1.0, transform=None):
# 局部坐标XYZ(m) rgb
points = np.transpose(np.array([las.X*lasHeader.scales[0],las.Y*lasHeader.scales[1],las.Z*lasHeader.scales[2],
las.red,las.green,las.blue]))
self.las_points.append(points)
coordMIN, coordMAX = np.amin(points, axis=0)[:3], np.amax(points, axis=0)[:3]
self.las_coord_MIN.append(coordMIN), self.las_coord_MAX.append(coordMAX)
# label
labels = np.transpose(np.array([las.classification]))
labels[labels == 11] = 1
self.las_labels.append(labels)
num_point_all.append(labels.size)
# 标签的统计直方图
tmp, _ = np.histogram(labels, range(classNumber + 1))
labelweights += tmp
def __getitem__(self, idx):
las_idx = self.las_idxs[idx]
points = self.las_points[las_idx] # N * 6
labels = self.las_labels[las_idx] # N
N_points = points.shape[0]
# # normalize old 9通道特征
# selected_points = points[selected_point_idxs, :] # num_point * 6
# current_points = np.zeros((self.num_point, 9)) # num_point * 9
# current_points[:, 6] = selected_points[:, 0] / self.las_coord_MAX[las_idx][0]
# current_points[:, 7] = selected_points[:, 1] / self.las_coord_MAX[las_idx][1]
# current_points[:, 8] = selected_points[:, 2] / self.las_coord_MAX[las_idx][2]
# selected_points[:, 0] = selected_points[:, 0] - center[0] # 相对块中心的x
# selected_points[:, 1] = selected_points[:, 1] - center[1] # 相对块中心的y
# selected_points[:, 3:6] /= 255.0
# current_points[:, 0:6] = selected_points
# region ### normalize 6通道特征:块相对坐标;全局相对坐标(无颜色)
selected_points = points[selected_point_idxs, 0:3] # num_point * 3
block_points = points[selected_point_idxs, 0:3]
block_points[:, 0] = selected_points[:, 0] - center[0] # 相对块中心的x
block_points[:, 1] = selected_points[:, 1] - center[1] # 相对块中心的y
block_points[:, 2] = selected_points[:, 2]
current_points = np.zeros((self.num_point, 6)) # num_point * 6
current_points[:, 0:3] = block_points
current_points[:, 3] = (selected_points[:, 0]-self.las_coord_MIN[las_idx][0]) / (self.las_coord_MAX[las_idx][0]-self.las_coord_MIN[las_idx][0])
current_points[:, 4] = (selected_points[:, 1]-self.las_coord_MIN[las_idx][1]) / (self.las_coord_MAX[las_idx][1]-self.las_coord_MIN[las_idx][1])
current_points[:, 5] = (selected_points[:, 2]-self.las_coord_MIN[las_idx][2]) / (self.las_coord_MAX[las_idx][2]-self.las_coord_MIN[las_idx][2])
# endregion
# 测试用
class testDatasetToPred():
# prepare to give prediction on each points
def __init__(self, data_root, block_points=1024, split='test', stride=0.5, block_size=1.0, padding=0.001):
for file in self.file_list:
# las文件的绝对路径
pathLAS = os.path.join(data_root, file)
# 读取文件:ndarrya:点数量,7(xyz rgb l)
las = laspy.read(pathLAS)
# 头文件信息 偏移和尺度
lasHeader = las.header
self.las_offset.append(lasHeader.offsets), self.las_scales.append(lasHeader.scales)
# 局部坐标XYZ(m) rgb 真实标签
data = np.transpose(np.array([las.X * lasHeader.scales[0], las.Y * lasHeader.scales[1], las.Z * lasHeader.scales[2],
las.red,las.green,las.blue, las.classification])) # ndarray=点数量,7
# 局部坐标XYZ(m)
points = data[:, :3]
coordMIN, coordMAX = np.amin(points, axis=0)[:3], np.amax(points, axis=0)[:3]
self.las_coord_MIN.append(coordMIN), self.las_coord_MAX.append(coordMAX)
self.scene_points_list.append(data[:, :6])
# 真实标签
labels = data[:, 6]
labels[labels == 11] = 1
self.semantic_labels_list.append(labels)
def __getitem__(self, index):
for index_x in range(0, grid_x):
# region ### 6通道特征:块相对坐标;全局相对坐标(无颜色)
data_batch[:, 0] = data_batch[:, 0] - (s_x + self.block_size / 2.0)
data_batch[:, 1] = data_batch[:, 1] - (s_y + self.block_size / 2.0)
normlized_xyz = np.zeros((point_size, 3))
temp = points[point_idxs, :]
normlized_xyz[:, 0] = (temp[:, 0]-coordMIN[0]) / (coordMAX[0]-coordMIN[0])
normlized_xyz[:, 1] = (temp[:, 1]-coordMIN[1]) / (coordMAX[1]-coordMIN[1])
normlized_xyz[:, 2] = (temp[:, 2]-coordMIN[2]) / (coordMAX[2]-coordMIN[2])
### 6通道特征组合
data_batch = np.concatenate((data_batch[:,0:3], normlized_xyz), axis=1)
#endregion
pointnet_sem_seg.py
class get_model(nn.Module):
def __init__(self, num_class):
super(get_model, self).__init__()
self.k = num_class
self.feat = PointNetEncoder(global_feat=False, feature_transform=True, channel=6) ###6通道特征
pointnet2_sem_seg.py
class get_model(nn.Module):
def __init__(self, num_classes):
super(get_model, self).__init__()
self.sa1 = PointNetSetAbstraction(1024, 0.1, 32, 6 + 3, [32, 32, 64], False)### 6代表输入网络的通道数量
# 参考
# https://github.com/yanx27/Pointnet_Pointnet2_pytorch
# 先在Terminal运行:python -m visdom.server
# 再运行本文件
# True为PointNet++
PN2bool = True
# PN2bool = False
# 当前文件的路径
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
# 输出 PointNet训练模型的路径: PointNet
dirModel1 = ROOT_DIR + '/trainModel/pointnet_model'
if not os.path.exists(dirModel1):
os.makedirs(dirModel1)
# 输出 PointNet++训练模型的路径
dirModel2 = ROOT_DIR + '/trainModel/PointNet2_model'
if not os.path.exists(dirModel2):
os.makedirs(dirModel2)
# 日志的路径
pathLog = os.path.join(ROOT_DIR, 'LOG_train.txt')
# 训练数据集的路径
pathDataset = os.path.join(ROOT_DIR, 'dataset/lasDatasetClassification/')
# 点云语义分割的类别名称:这里只分了2类
classNumber = 2
classes = ['un', 'rs']
class2label = {cls: i for i, cls in enumerate(classes)}
seg_classes = class2label
seg_label_to_cat = {}
for i, cat in enumerate(seg_classes.keys()):
seg_label_to_cat[i] = cat
# 参考
# https://github.com/yanx27/Pointnet_Pointnet2_pytorch
# True为PointNet++
PN2bool = True
# PN2bool = False
# save to LAS
import laspy
def SaveResultLAS(newLasPath, las_offsets, las_scales,point_np, rgb_np, label1, label2):
# data
newx = point_np[:, 0]+las_offsets[0]
newy = point_np[:, 1]+las_offsets[1]
newz = point_np[:, 2]+las_offsets[2]
newred = rgb_np[:, 0]
newgreen = rgb_np[:, 1]
newblue = rgb_np[:, 2]
newclassification = label1
newuserdata = label2
minx = min(newx)
miny = min(newy)
minz = min(newz)
# create a new header
newheader = laspy.LasHeader(point_format=3, version="1.2")
newheader.scales = np.array([0.0001, 0.0001, 0.0001])
newheader.offsets = np.array([minx, miny, minz])
newheader.add_extra_dim(laspy.ExtraBytesParams(name="Classification", type=np.uint8))
newheader.add_extra_dim(laspy.ExtraBytesParams(name="UserData", type=np.uint8))
# create a Las
newlas = laspy.LasData(newheader)
newlas.x = newx
newlas.y = newy
newlas.z = newz
newlas.red = newred
newlas.green = newgreen
newlas.blue = newblue
newlas.Classification = newclassification
newlas.UserData = newuserdata
# write
newlas.write(newLasPath)
# 当前文件的路径
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
# 模型的路径
pathTrainModel = os.path.join(ROOT_DIR, 'trainModel/pointnet_model')
if PN2bool:
pathTrainModel = os.path.join(ROOT_DIR, 'trainModel/PointNet2_model')
# 预测结果路径
visual_dir = ROOT_DIR + '/testResultPN/'
if PN2bool:
visual_dir = ROOT_DIR + '/testResultPN2/'
visual_dir = Path(visual_dir)
visual_dir.mkdir(exist_ok=True)
# 日志的路径
pathLog = os.path.join(ROOT_DIR, 'LOG_test_eval.txt')
# 测试数据的路径
pathDataset = os.path.join(ROOT_DIR, 'dataset/lasDatasetClassification2/')
# 点云语义分割的类别名称:这里只分了2类
classNumber = 2
classes = ['un', 'rs']
class2label = {cls: i for i, cls in enumerate(classes)}
seg_classes = class2label
seg_label_to_cat = {}
for i, cat in enumerate(seg_classes.keys()):
seg_label_to_cat[i] = cat