在计算机视觉任务中,图像分割是一项重要的任务,而对分割结果进行评估则是验证模型性能的关键一环。本文将介绍如何使用Python和OpenCV编写一个简单的批量图像分割评估脚本,以评估分割模型的性能。
假设我们有一组GT(Ground Truth)图像和相应的KMeans算法生成的分割图像。我们想要批量评估这些分割图像与GT图像之间的相似度,以便量化模型的性能。先装包
pip install numpy opencv-python tqdm hausdorff
首先,定义了一组评估指标,包括Dice系数、IoU(Intersection over Union)、灵敏度、PPV(Positive Predictive Value)以及Hausdorff距离的95th percentile。
def dice_coef(predict: np.ndarray, label: np.ndarray, epsilon: float = 1e-5) -> float:
predict, label = transform_image_data(predict, label)
intersection = (predict * label).sum()
return (2. * intersection + epsilon) / (predict.sum() + label.sum() + epsilon)
def iou_score(predict: np.ndarray, label: np.ndarray, epsilon: float = 1e-5) -> float:
predict, label = transform_image_data(predict, label)
intersection = (predict & label).sum()
union = (predict | label).sum()
return (intersection + epsilon) / (union + epsilon)
def sensitivity(predict: np.ndarray, label: np.ndarray, epsilon: float = 1e-5) -> float:
predict, label = transform_image_data(predict, label)
intersection = (predict * label).sum()
return (intersection + epsilon) / (label.sum() + epsilon)
def ppv(predict: np.ndarray, label: np.ndarray, epsilon: float = 1e-5) -> float:
predict, label = transform_image_data(predict, label)
intersection = (predict * label).sum()
return (intersection + epsilon) / (predict.sum() + epsilon)
def hd95(predict: np.ndarray, label: np.ndarray, distance="euclidean"):
predict, label = transform_image_data(predict, label)
predict = predict.flatten()[..., None]
label = label.flatten()[..., None]
distance = hausdorff.hausdorff_distance(predict, label, distance=distance)
return distance * 0.95
然后,我们编写了一个函数,该函数接受包含GT和分割图像的文件夹路径,并返回每个图像的评估指标。
def batch_evaluation(data_folder, extension='.png'):
gt_files = glob.glob(data_folder + '/gt*' + extension)
mask_files = glob.glob(data_folder + '/mask*' + extension)
dice_scores = []
iou_scores = []
sensitivity_scores = []
ppv_scores = []
hd95_distances = []
for gt_file, mask_file in tqdm(zip(gt_files, mask_files), total=len(gt_files)):
gt_image = cv2.imread(gt_file, 0)
mask_image = cv2.imread(mask_file, 0)
dice_scores.append(dice_coef(mask_image, gt_image))
iou_scores.append(iou_score(mask_image, gt_image))
sensitivity_scores.append(sensitivity(mask_image, gt_image))
ppv_scores.append(ppv(mask_image, gt_image))
hd95_distances.append(hd95(mask_image, gt_image))
return {
'dice_scores': dice_scores,
'iou_scores': iou_scores,
'sensitivity_scores': sensitivity_scores,
'ppv_scores': ppv_scores,
'hd95_distances': hd95_distances
}
最后,我们使用这个函数对指定文件夹中的所有图像进行评估,并输出结果。
# 执行批量评估
evaluation_results = batch_evaluation(data_folder)
# 输出结果
print("Dice Scores:", evaluation_results['dice_scores'])
print("IOU Scores:", evaluation_results['iou_scores'])
print("Sensitivity Scores:", evaluation_results['sensitivity_scores'])
print("PPV Scores:", evaluation_results['ppv_scores'])
print("HD95 Distances:", evaluation_results['hd95_distances'])
完整脚本如下
import glob
import cv2
import numpy as np
from tqdm import tqdm
def transform_image_data(predict: np.ndarray, label: np.ndarray):
predict = predict.astype(np.bool_).astype(np.int_)
label = label.astype(np.bool_).astype(np.int_)
return predict, label
def dice_coef(predict: np.ndarray, label: np.ndarray, epsilon: float = 1e-5) -> float:
predict, label = transform_image_data(predict, label)
intersection = (predict * label).sum()
return (2. * intersection + epsilon) / (predict.sum() + label.sum() + epsilon)
def iou_score(predict: np.ndarray, label: np.ndarray, epsilon: float = 1e-5) -> float:
predict, label = transform_image_data(predict, label)
intersection = (predict & label).sum()
union = (predict | label).sum()
return (intersection + epsilon) / (union + epsilon)
def sensitivity(predict: np.ndarray, label: np.ndarray, epsilon: float = 1e-5) -> float:
predict, label = transform_image_data(predict, label)
intersection = (predict * label).sum()
return (intersection + epsilon) / (label.sum() + epsilon)
def ppv(predict: np.ndarray, label: np.ndarray, epsilon: float = 1e-5) -> float:
predict, label = transform_image_data(predict, label)
intersection = (predict * label).sum()
return (intersection + epsilon) / (predict.sum() + epsilon)
def hd95(predict: np.ndarray, label: np.ndarray, distance="euclidean"):
predict, label = transform_image_data(predict, label)
predict = predict.flatten()[..., None]
label = label.flatten()[..., None]
distance = hausdorff.hausdorff_distance(predict, label, distance=distance)
return distance * 0.95
def batch_evaluation(data_folder, extension='.png'):
gt_files = glob.glob(data_folder + '/gt*' + extension)
mask_files = glob.glob(data_folder + '/mask*' + extension)
dice_scores = []
iou_scores = []
sensitivity_scores = []
ppv_scores = []
hd95_distances = []
for gt_file, mask_file in tqdm(zip(gt_files, mask_files), total=len(gt_files)):
gt_image = cv2.imread(gt_file, 0)
mask_image = cv2.imread(mask_file, 0)
dice_scores.append(dice_coef(mask_image, gt_image))
iou_scores.append(iou_score(mask_image, gt_image))
sensitivity_scores.append(sensitivity(mask_image, gt_image))
ppv_scores.append(ppv(mask_image, gt_image))
hd95_distances.append(hd95(mask_image, gt_image))
return {
'dice_scores': dice_scores,
'iou_scores': iou_scores,
'sensitivity_scores': sensitivity_scores,
'ppv_scores': ppv_scores,
'hd95_distances': hd95_distances
}
# 指定包含图像文件的文件夹路径
data_folder = '/path/to/your/data/folder'
# 执行批量评估
evaluation_results = batch_evaluation(data_folder)
# 输出结果
print("Dice Scores:", evaluation_results['dice_scores'])
print("IOU Scores:", evaluation_results['iou_scores'])
print("Sensitivity Scores:", evaluation_results['sensitivity_scores'])
print("PPV Scores:", evaluation_results['ppv_scores'])
print("HD95 Distances:", evaluation_results['hd95_distances'])