图像评价指标PSNR、SSIM、LPIPS、FID

发布时间:2024年01月05日

最近再复现图像处理方面的论文的时候发现几个评价图像质量的指标:
1、FID(Fréchet Inception Distance):FID是用于评估生成模型的质量和多样性的指标。值越小,表示生成图片越多样、质量越好

2、SSIM(Structural Similarity Index):SSIM用于衡量两幅图像的相似性,包括亮度、对比度和结构。是一种测量两个图像之间相似性的方法。值越接近1表示图像的质量更高,相反接近0的值表示的图像质量较差。

3、PSNR(Peak Signal-to-Noise Ratio):PSNR用于衡量图像的质量,通过比较原始图像和压缩图像之间的峰值信噪比来评估压缩算法的效果。PSNR的值越高,表示图像质量越好。

4、LPIPS(Learned Perceptual Image Patch Similarity):LPIPS用于衡量两幅图像的感知相似度,与人类主观感知更加接近。该度量标准学习生成图像到ground truth的反向映射强制生成器学习从假图像中重构真是图像的反向映射,并优先处理它们之间的感知相似度。LPIPS的值越低,表示两幅图像的感知差异越小。
因为FID的特殊性,这里先介绍除了FID的其他三个评价指标。
关于FID大家可以看下面这篇博客(都是我写滴😀!!!)

https://blog.csdn.net/qq_43826289/article/details/135396468?spm=1001.2014.3001.5502

from tqdm import tqdm
import torch
import os
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import numpy as np
import cv2
import lpips
from skimage.metrics import peak_signal_noise_ratio as psnr_loss
from skimage.metrics import structural_similarity as ssim_loss
import argparse

parser = argparse.ArgumentParser(description='PSNR SSIM script', add_help=False)
# 将此处的路径换成自己的原始输入图像文件夹
parser.add_argument('--input_images_path', default='./image/input')
# 将此处的路径换成自己的输出图像文件夹
parser.add_argument('--image2smiles2image_save_path', default='./image/output')
parser.add_argument('-v', '--version', type=str, default='0.1')
args = parser.parse_args()


def is_png_file(filename):
    return any(filename.endswith(extension) for extension in [".jpg", ".png", ".jpeg"])


def load_img(filepath):
    img = cv2.cvtColor(cv2.imread(filepath), cv2.COLOR_BGR2RGB)
    img = img.astype(np.float32)
    img = img / 255.
    return img


class DataLoaderVal(Dataset):
    def __init__(self, target_transform=None):
        super(DataLoaderVal, self).__init__()

        self.target_transform = target_transform

        gt_dir = args.input_images_path
        input_dir = args.image2smiles2image_save_path

        clean_files = sorted(os.listdir(os.path.join(gt_dir)))
        noisy_files = sorted(os.listdir(os.path.join(input_dir)))

        self.clean_filenames = [os.path.join(gt_dir, x) for x in clean_files if is_png_file(x)]
        self.noisy_filenames = [os.path.join(input_dir, x) for x in noisy_files if is_png_file(x)]

        self.tar_size = len(self.clean_filenames)

    def __len__(self):
        return self.tar_size

    def __getitem__(self, index):
        tar_index = index % self.tar_size

        clean = torch.from_numpy(np.float32(load_img(self.clean_filenames[tar_index])))
        noisy = torch.from_numpy(np.float32(load_img(self.noisy_filenames[tar_index])))

        clean_filename = os.path.split(self.clean_filenames[tar_index])[-1]
        noisy_filename = os.path.split(self.noisy_filenames[tar_index])[-1]

        clean = clean.permute(2, 0, 1)
        noisy = noisy.permute(2, 0, 1)

        return clean, noisy, clean_filename, noisy_filename


def get_validation_data():
    return DataLoaderVal(None)


test_dataset = get_validation_data()
test_loader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=False, num_workers=0, drop_last=False)

## Initializing the model
loss_fn = lpips.LPIPS(net='alex', version=args.version)

if __name__ == '__main__':

    # ---------------------- PSNR + SSIM ----------------------
    psnr_val_rgb = []
    ssim_val_rgb = []
    for ii, data_test in enumerate(tqdm(test_loader), 0):
        rgb_groundtruth = data_test[0].numpy().squeeze().transpose((1, 2, 0))
        rgb_restored = data_test[1].cuda()

        rgb_restored = torch.clamp(rgb_restored, 0, 1).cpu().numpy().squeeze().transpose((1, 2, 0))
        psnr_val_rgb.append(psnr_loss(rgb_restored, rgb_groundtruth))
        ssim_val_rgb.append(ssim_loss(rgb_restored, rgb_groundtruth, multichannel=True))

    psnr_val_rgb = sum(psnr_val_rgb) / len(test_dataset)
    ssim_val_rgb = sum(ssim_val_rgb) / len(test_dataset)

    # ---------------------- LPIPS ----------------------
    files = os.listdir(args.input_images_path)
    i = 0
    total_lpips_distance = 0
    average_lpips_distance = 0
    for file in files:

        try:
            # Load images
            img0 = lpips.im2tensor(lpips.load_image(os.path.join(args.input_images_path, file)))
            img1 = lpips.im2tensor(lpips.load_image(os.path.join(args.image2smiles2image_save_path, file)))

            if (os.path.exists(os.path.join(args.input_images_path, file)),
                os.path.exists(os.path.join(args.image2smiles2image_save_path, file))):
                i = i + 1

            # Compute distance
            current_lpips_distance = loss_fn.forward(img0, img1)
            total_lpips_distance = total_lpips_distance + current_lpips_distance

        except Exception as e:
            print(e)

    average_lpips_distance = float(total_lpips_distance) / i

    print("The processed iamges is ", i)
    print("PSNR: %f, SSIM: %f, LPIPS: %f " % (psnr_val_rgb, ssim_val_rgb, average_lpips_distance))

注意:这里的输入数据集跟输出数据集之前的图像数量要一致,名称也要相同,包括分辨率,类型(要么是jpg要么个png,不能一个数据集是jpg一个是png)不然会出错。

文章来源:https://blog.csdn.net/qq_43826289/article/details/135396128
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。