【opencv450 Image Processing】Histogram Comparison直方图比较

本文介绍如何使用OpenCV库中的compareHist函数比较图像的H-S直方图,并通过不同指标评估图像间的相似度。

OpenCV: Histogram Comparison

Goal

在本教程中,您将学习如何:

使用函数 cv::compareHist 获取一个数值参数,该参数表示两个直方图相互匹配的程度

使用不同的指标来比较直方图

Theory

比较两个直方图(H1 和 H2),首先我们必须选择一个指标(d(H1,H2))来表示两个直方图的匹配程度。

OpenCV 实现函数 cv::compareHist 来执行比较。 它还提供了 4 种不同的指标来计算匹配:

  1. Correlation ( CV_COMP_CORREL ) 相关性

 其中

N 是直方图 bin 的总数。

2. Chi-Square ( CV_COMP_CHISQR ) 卡方

 

3.Intersection ( method=CV_COMP_INTERSECT )  相交

 4.Bhattacharyya distance ( CV_COMP_BHATTACHARYYA )

Code

这个程序有什么作用?

加载一个基本图像2 要与之比较的测试图像

生成 1 个图像,它是基础图像的下半部分

将图像转换为 HSV 格式

计算所有图像的 H-S 直方图并将它们归一化以便比较它们。

将基础图像的直方图与 2 个测试直方图、下半部分基础图像的直方图和相同的基础图像直方图进行比较。

显示获得的数值匹配参数。

可下载代码:点击这里opencv/compareHist_Demo.cpp at 4.x · opencv/opencv (github.com)

代码一览:

/**
 * @file compareHist_Demo.cpp
 * @brief 比较直方图Sample code to use the function compareHist
 * @author OpenCV team
 */

#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>

using namespace std;
using namespace cv;

const char* keys =
    "{ help  h| | Print help message. }"
    "{ @input1 | | Path to input image 1. }"
    "{ @input2 | | Path to input image 2. }"
    "{ @input3 | | Path to input image 3. }";

/**
 * @function main
 */
int main( int argc, char** argv )
{
    //! [加载三个具有不同环境设置的图像]
    CommandLineParser parser( argc, argv, keys );
    Mat src_base = imread( parser.get<String>("input1") );
    Mat src_test1 = imread( parser.get<String>("input2") );
    Mat src_test2 = imread( parser.get<String>("input3") );
    if( src_base.empty() || src_test1.empty() || src_test2.empty() )
    {
        cout << "Could not open or find the images!\n" << endl;
        parser.printMessage();
        return -1;
    }
    //! [Load three images with different environment settings]

    //! [转换为 HSV]
    Mat hsv_base, hsv_test1, hsv_test2;
    cvtColor( src_base, hsv_base, COLOR_BGR2HSV );
    cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV );
    cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV );
    //! [Convert to HSV]

    //! [Convert to HSV half] 取hsv_base的下半部分
    Mat hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows ), Range( 0, hsv_base.cols ) );
    //! [Convert to HSV half]

    //! [Using 50 bins for hue and 60 for saturation]色调使用 50 个 bin,饱和度使用 60
    int h_bins = 50, s_bins = 60;
    int histSize[] = { h_bins, s_bins };

    // hue varies from 0 to 179, saturation from 0 to 255 色调从 0 到 179 变化,饱和度从 0 到 255
    float h_ranges[] = { 0, 180 };
    float s_ranges[] = { 0, 256 };

    const float* ranges[] = { h_ranges, s_ranges };

    // Use the 0-th and 1-st channels 使用第 0 和第 1 通道
    int channels[] = { 0, 1 };
    //! [Using 50 bins for hue and 60 for saturation]

    //! [Calculate the histograms for the HSV images] 计算4张 HSV 图像的直方图并归一化
    Mat hist_base, hist_half_down, hist_test1, hist_test2;

    calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false );
    normalize( hist_base, hist_base, 0, 1, NORM_MINMAX, -1, Mat() );

    calcHist( &hsv_half_down, 1, channels, Mat(), hist_half_down, 2, histSize, ranges, true, false );
    normalize( hist_half_down, hist_half_down, 0, 1, NORM_MINMAX, -1, Mat() );

    calcHist( &hsv_test1, 1, channels, Mat(), hist_test1, 2, histSize, ranges, true, false );
    normalize( hist_test1, hist_test1, 0, 1, NORM_MINMAX, -1, Mat() );

    calcHist( &hsv_test2, 1, channels, Mat(), hist_test2, 2, histSize, ranges, true, false );
    normalize( hist_test2, hist_test2, 0, 1, NORM_MINMAX, -1, Mat() );
    //! [Calculate the histograms for the HSV images]

    //! [应用直方图比较方法]
    for( int compare_method = 0; compare_method < 4; compare_method++ )
    {
        double base_base = compareHist( hist_base, hist_base, compare_method );
        double base_half = compareHist( hist_base, hist_half_down, compare_method );
        double base_test1 = compareHist( hist_base, hist_test1, compare_method );
        double base_test2 = compareHist( hist_base, hist_test2, compare_method );

        cout << "Method " << compare_method << " Perfect, Base-Half, Base-Test(1), Base-Test(2) : "
             <<  base_base << " / " << base_half << " / " << base_test1 << " / " << base_test2 << endl;
    }
    //! [Apply the histogram comparison methods]

    cout << "Done \n";
    return 0;
}

Explanation

加载基础图像 (src_base) 和其他两个测试图像:

    CommandLineParser parser( argc, argv, keys );
    Mat src_base = imread( parser.get<String>("input1") );//基础图像
    Mat src_test1 = imread( parser.get<String>("input2") );//测试图像1
Mat src_test2 = imread( parser.get<String>("input3") );//测试图像2
if( src_base.empty() || src_test1.empty() || src_test2.empty() )
    {
        cout << "Could not open or find the images!\n" << endl;
        parser.printMessage();
        return -1;
    }

将它们转换为 HSV 格式

    Mat hsv_base, hsv_test1, hsv_test2;
    cvtColor( src_base, hsv_base, COLOR_BGR2HSV );
    cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV );
    cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV );

此外,创建一半基本图像的图像(HSV 格式):

    Mat hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows ), Range( 0, hsv_base.cols ) ); //下半截图像

初始化参数以计算直方图(箱、范围和通道 H 和 S )。

    int h_bins = 50, s_bins = 60;
    int histSize[] = { h_bins, s_bins };
// 色调从 0 到 179 变化,饱和度从 0 到 255 
// hue varies from 0 to 179, saturation from 0 to 255
    float h_ranges[] = { 0, 180 };
    float s_ranges[] = { 0, 256 };
    const float* ranges[] = { h_ranges, s_ranges };
    // Use the 0-th and 1-st channels
    int channels[] = { 0, 1 };

计算基础图像、2 个测试图像和 基础图像下半部分的直方图

    Mat hist_base, hist_half_down, hist_test1, hist_test2;
    calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false );
    normalize( hist_base, hist_base, 0, 1, NORM_MINMAX, -1, Mat() );
    calcHist( &hsv_half_down, 1, channels, Mat(), hist_half_down, 2, histSize, ranges, true, false );
    normalize( hist_half_down, hist_half_down, 0, 1, NORM_MINMAX, -1, Mat() );
    calcHist( &hsv_test1, 1, channels, Mat(), hist_test1, 2, histSize, ranges, true, false );
    normalize( hist_test1, hist_test1, 0, 1, NORM_MINMAX, -1, Mat() );
    calcHist( &hsv_test2, 1, channels, Mat(), hist_test2, 2, histSize, ranges, true, false );
    normalize( hist_test2, hist_test2, 0, 1, NORM_MINMAX, -1, Mat() ); 

在基础图像 (hist_base) 的直方图和其他直方图之间依次应用 4 种比较方法

    for( int compare_method = 0; compare_method < 4; compare_method++ )
    {
        double base_base = compareHist( hist_base, hist_base, compare_method );
        double base_half = compareHist( hist_base, hist_half_down, compare_method );
        double base_test1 = compareHist( hist_base, hist_test1, compare_method );
        double base_test2 = compareHist( hist_base, hist_test2, compare_method );
        cout << "Method " << compare_method << " Perfect, Base-Half, Base-Test(1), Base-Test(2) : "
             <<  base_base << " / " << base_half << " / " << base_test1 << " / " << base_test2 << endl;
    }

Results

  1. 我们使用以下图像作为输入:

 

Base_0

 

Test_1

 

Test_2

其中第一个是基础(与其他图像进行比较),另外两个是测试图像。 我们还将比较第一张图像与其自身和一半的基本图像。

2.   当我们将基本图像直方图与其自身进行比较时,我们应该期待完美匹配。 此外,与一半基本图像的直方图相比,它应该呈现出高度匹配,因为两者都来自同一来源。 对于另外两张测试图像,我们可以观察到它们的光照条件非常不同,所以匹配应该不是很好

3. 这是我们使用 OpenCV 3.4.1 得到的数值结果:

*Method*

Base - Base

Base - Half

Base - Test 1

Base - Test 2

*Correlation*

1.000000

0.880438

0.20457

0.0664547

*Chi-square*

0.000000

4.6834

2697.98

4763.8

*Intersection*

18.8947

13.022

5.44085

2.58173

*Bhattacharyya*

0.000000

0.237887

0.679826

0.874173

对于 Correlation 和 Intersection 方法度量越高,匹配越准确。 正如我们所见,match base-base 是所有预期中最高的。 我们还可以观察到匹配的下半部分是第二好的匹配(正如我们所预测的那样)。 对于其他两个指标(Chi-squareBhattacharyya),结果越少,匹配越好。 我们可以观察到,测试 1 和测试 2 之间关于基的匹配更差,这也是意料之中的。

import cv2 import numpy as np import matplotlib.pyplot as plt from skimage import exposure, filters, io import os import tifffile import glob from datetime import datetime import pandas as pd import re # 解决汉语不能正常显示问题 plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans'] plt.rcParams['axes.unicode_minus'] = False class BatchEmbryoPreprocessor: def __init__(self, target_size=(640, 640)): self.target_size = target_size self.clahe = cv2.createCLAHE(clipLimit=1.5, tileGridSize=(8, 8)) self.quantitative_data = [] def sanitize_filename(self, filename): """清理文件名,移除非法字符但保持原始命名风格""" # 只移除真正有问题的字符,保持数字和简单名称 filename = re.sub(r'[<>:"/\\|?*]', '_', filename) filename = filename.strip() return filename def load_tif_image(self, image_path): """专门加载TIF格式图像""" if not os.path.exists(image_path): raise FileNotFoundError(f"图像文件不存在: {image_path}") try: tif_image = tifffile.imread(image_path) print( f"原始TIF图像信息 - 形状: {tif_image.shape}, 数据类型: {tif_image.dtype}, 范围: [{tif_image.min()}, {tif_image.max()}]") if tif_image.dtype == np.uint16: print("检测到16位TIF图像,进行8位转换...") p2, p98 = np.percentile(tif_image, (2, 98)) image_8bit = exposure.rescale_intensity(tif_image, in_range=(p2, p98), out_range=(0, 255)).astype( np.uint8) elif tif_image.dtype == np.float32 or tif_image.dtype == np.float64: print("检测到浮点型TIF图像,进行8位转换...") image_8bit = exposure.rescale_intensity(tif_image, out_range=(0, 255)).astype(np.uint8) else: image_8bit = tif_image.astype(np.uint8) if len(image_8bit.shape) == 3: if image_8bit.shape[2] == 3: image_cv = cv2.cvtColor(image_8bit, cv2.COLOR_RGB2BGR) elif image_8bit.shape[2] == 4: image_cv = cv2.cvtColor(image_8bit[:, :, :3], cv2.COLOR_RGB2BGR) else: image_cv = image_8bit[:, :, 0] else: image_cv = image_8bit return image_cv, tif_image except Exception as e: print(f"tifffile读取失败: {e},尝试OpenCV...") image = cv2.imread(image_path, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR) if image is None: raise ValueError(f"无法读取TIF图像: {image_path}") return image, image def basic_enhancement(self, image): """基础对比度增强""" if image.dtype != np.uint8: image = exposure.rescale_intensity(image, out_range=(0, 255)).astype(np.uint8) if len(image.shape) == 3: gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) else: gray = image.copy() clahe_img = self.clahe.apply(gray) return clahe_img, gray def advanced_enhancement(self, image): """高级图像增强""" if len(image.shape) == 3: gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) else: gray = image.copy() if gray.dtype != np.uint8: gray = exposure.rescale_intensity(gray, out_range=(0, 255)).astype(np.uint8) gamma_corrected = exposure.adjust_gamma(gray, gamma=0.7) adaptive_eq = exposure.equalize_adapthist(gray, clip_limit=0.03) adaptive_eq = (adaptive_eq * 255).astype(np.uint8) try: guided = cv2.ximgproc.guidedFilter(gray, gray, radius=5, eps=50) guided = (guided * 255).astype(np.uint8) except: guided = gray.copy() return gamma_corrected, adaptive_eq, guided def noise_reduction(self, image): """噪声去除""" gaussian = cv2.GaussianBlur(image, (3, 3), 0) median = cv2.medianBlur(image, 3) try: nlm = cv2.fastNlMeansDenoising(image, None, h=10, templateWindowSize=7, searchWindowSize=21) except: nlm = image.copy() return gaussian, median, nlm def edge_enhancement(self, image): """边缘增强""" kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]) sharpened = cv2.filter2D(image, -1, kernel) return sharpened def resize_image(self, image): """调整图像尺寸""" resized = cv2.resize(image, self.target_size) return resized def calculate_image_metrics(self, image, stage_name): """计算图像质量指标""" if len(image.shape) == 3: gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) else: gray = image metrics = { 'stage': stage_name, 'mean_intensity': np.mean(gray), 'std_intensity': np.std(gray), 'contrast': gray.std() / gray.mean() if gray.mean() > 0 else 0, 'entropy': self.calculate_entropy(gray), 'snr_estimate': np.mean(gray) / np.std(gray) if np.std(gray) > 0 else 0 } return metrics def calculate_entropy(self, image): """计算图像熵""" histogram = cv2.calcHist([image], [0], None, [256], [0, 256]) histogram = histogram / histogram.sum() entropy = -np.sum(histogram * np.log2(histogram + 1e-7)) return entropy def full_preprocessing_pipeline(self, image_path): """完整的8步预处理流水线""" original_filename = os.path.splitext(os.path.basename(image_path))[0] clean_filename = self.sanitize_filename(original_filename) # 1. 加载原始图像 original_8bit, original_raw = self.load_tif_image(image_path) # 2. 灰度转换 clahe_img, gray_original = self.basic_enhancement(original_8bit) # 3. CLAHE对比度增强 # (已在basic_enhancement中完成) # 4. 伽马校正 gamma_img, adaptive_eq, guided_img = self.advanced_enhancement(original_8bit) # 5. 自适应直方图均衡 # (已在advanced_enhancement中完成) # 6. 非局部均值去噪 gaussian, median, nlm = self.noise_reduction(clahe_img) # 7. 边缘锐化增强 sharpened = self.edge_enhancement(nlm) # 8. 尺寸标准化 final_resized = self.resize_image(sharpened) # 收集所有处理结果 processing_stages = { '1_original': gray_original, '2_clahe': clahe_img, '3_gamma': gamma_img, '4_adaptive_eq': adaptive_eq, '5_nlm_denoised': nlm, '6_sharpened': sharpened, '7_final': final_resized } # 计算各阶段质量指标 metrics_data = [] for stage_name, stage_image in processing_stages.items(): metrics = self.calculate_image_metrics(stage_image, stage_name) metrics['filename'] = clean_filename metrics_data.append(metrics) self.quantitative_data.extend(metrics_data) return processing_stages, clean_filename, original_filename def save_all_processing_steps(self, processing_stages, clean_filename, original_filename, output_dir): """保存所有处理步骤的图像""" os.makedirs(output_dir, exist_ok=True) print(f"正在保存所有处理步骤图像到: {output_dir}") # 使用原始文件名保存,保持简单命名 images_to_save = { f'{original_filename}_original.jpg': processing_stages['1_original'], f'{original_filename}_clahe.jpg': processing_stages['2_clahe'], f'{original_filename}_gamma.jpg': processing_stages['3_gamma'], f'{original_filename}_adaptive.jpg': processing_stages['4_adaptive_eq'], f'{original_filename}_nlm.jpg': processing_stages['5_nlm_denoised'], f'{original_filename}_sharpened.jpg': processing_stages['6_sharpened'], f'{original_filename}_final.jpg': processing_stages['7_final'] } success_count = 0 for img_name, img_data in images_to_save.items(): output_path = os.path.join(output_dir, img_name) try: # 确保图像是uint8类型 if img_data.dtype != np.uint8: img_data = exposure.rescale_intensity(img_data, out_range=(0, 255)).astype(np.uint8) success = cv2.imwrite(output_path, img_data) if success and os.path.exists(output_path): file_size = os.path.getsize(output_path) if file_size > 0: print(f"✓ {img_name} 保存成功 ({file_size} 字节)") success_count += 1 else: print(f"❌ {img_name} 文件大小为0") else: print(f"❌ {img_name} 保存失败") except Exception as e: print(f"❌ {img_name} 保存异常: {e}") print(f"图像保存结果: {success_count}/{len(images_to_save)} 成功") return success_count == len(images_to_save) def save_final_images(self, processing_stages, clean_filename, original_filename, output_dir_A): """保存最终图像到A文件夹 - 使用原始简单命名""" os.makedirs(output_dir_A, exist_ok=True) final_image = processing_stages['7_final'] # 使用原始文件名,保持简单命名 (1.jpg, 2.jpg, 3.jpg等) output_path = os.path.join(output_dir_A, f"{original_filename}.jpg") # 确保图像是uint8类型 if final_image.dtype != np.uint8: final_image = exposure.rescale_intensity(final_image, out_range=(0, 255)).astype(np.uint8) success = cv2.imwrite(output_path, final_image) if success and os.path.exists(output_path) and os.path.getsize(output_path) > 0: print(f"✓ 最终图像保存成功: {output_path}") return True else: print(f"❌ 最终图像保存失败: {output_path}") return False def create_comparison_figure(self, processing_stages, clean_filename, original_filename, output_dir_B): """创建原图与最终图像对比图到B文件夹""" os.makedirs(output_dir_B, exist_ok=True) fig, axes = plt.subplots(2, 4, figsize=(20, 10)) axes = axes.ravel() stages_to_show = list(processing_stages.keys()) titles = ['原始图像', 'CLAHE增强', '伽马校正', '自适应均衡', '非局部去噪', '边缘锐化', '最终结果', '处理流程'] for i, (stage, title) in enumerate(zip(stages_to_show[:7], titles[:7])): img = processing_stages[stage] axes[i].imshow(img, cmap='gray') axes[i].set_title(title) axes[i].axis('off') # 第八个图显示处理流程示意图 axes[7].text(0.5, 0.5, '1.原始→2.灰度→3.CLAHE→4.伽马\n5.自适应→6.去噪→7.锐化→8.尺寸', ha='center', va='center', fontsize=12, transform=axes[7].transAxes) axes[7].set_title('处理流程') axes[7].axis('off') plt.suptitle(f'{original_filename} - 预处理流程对比', fontsize=16) plt.tight_layout() # 使用原始文件名 output_path = os.path.join(output_dir_B, f"{original_filename}_comparison.jpg") plt.savefig(output_path, dpi=150, bbox_inches='tight') plt.close() print(f"✓ 对比图保存成功: {output_path}") return output_path def create_intensity_comparison(self, processing_stages, clean_filename, original_filename, output_dir_C): """创建像素强度对比图到C文件夹""" os.makedirs(output_dir_C, exist_ok=True) original = processing_stages['1_original'] final = processing_stages['7_final'] fig, axes = plt.subplots(2, 2, figsize=(12, 10)) # 原始图像直方图 axes[0, 0].hist(original.ravel(), bins=256, range=[0, 256], alpha=0.7, color='blue') axes[0, 0].set_title('原始图像直方图') axes[0, 0].set_xlabel('像素强度') axes[0, 0].set_ylabel('频次') # 最终图像直方图 axes[0, 1].hist(final.ravel(), bins=256, range=[0, 256], alpha=0.7, color='red') axes[0, 1].set_title('最终图像直方图') axes[0, 1].set_xlabel('像素强度') axes[0, 1].set_ylabel('频次') # 对比直方图 axes[1, 0].hist(original.ravel(), bins=256, range=[0, 256], alpha=0.5, color='blue', label='原始') axes[1, 0].hist(final.ravel(), bins=256, range=[0, 256], alpha=0.5, color='red', label='最终') axes[1, 0].set_title('强度分布对比') axes[1, 0].set_xlabel('像素强度') axes[1, 0].set_ylabel('频次') axes[1, 0].legend() # 统计信息 stats_text = f""" 原始图像: 均值: {np.mean(original):.1f} 标准差: {np.std(original):.1f} 对比度: {np.std(original) / np.mean(original) if np.mean(original) > 0 else 0:.3f} 最终图像: 均值: {np.mean(final):.1f} 标准差: {np.std(final):.1f} 对比度: {np.std(final) / np.mean(final) if np.mean(final) > 0 else 0:.3f} 噪声改善: {((np.std(original) - np.std(final)) / np.std(original) * 100 if np.std(original) > 0 else 0):.1f}% """ axes[1, 1].text(0.1, 0.9, stats_text, transform=axes[1, 1].transAxes, fontsize=10, verticalalignment='top', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) axes[1, 1].set_title('统计信息') axes[1, 1].axis('off') plt.suptitle(f'{original_filename} - 像素强度分析', fontsize=16) plt.tight_layout() # 使用原始文件名 output_path = os.path.join(output_dir_C, f"{original_filename}_intensity_analysis.jpg") plt.savefig(output_path, dpi=150, bbox_inches='tight') plt.close() print(f"✓ 强度分析图保存成功: {output_path}") return output_path def generate_quantitative_report(self, output_dir): """生成量化处理效果报告""" os.makedirs(output_dir, exist_ok=True) if not self.quantitative_data: print("没有量化数据可生成报告") return None, None df = pd.DataFrame(self.quantitative_data) # 确保数值列是数值类型 numeric_columns = ['mean_intensity', 'std_intensity', 'contrast', 'entropy', 'snr_estimate'] for col in numeric_columns: df[col] = pd.to_numeric(df[col], errors='coerce') # 生成详细报告 report_path = os.path.join(output_dir, "quantitative_analysis_report.txt") with open(report_path, 'w', encoding='utf-8') as f: f.write("胚胎图像预处理量化分析报告\n") f.write("=" * 60 + "\n") f.write(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n") f.write(f"处理图像数量: {len(df['filename'].unique())}\n") f.write(f"总数据点数: {len(df)}\n\n") # 各阶段统计 - 只计算数值列 f.write("各处理阶段质量指标汇总:\n") f.write("-" * 50 + "\n") # 按阶段分组并计算数值列的统计量 stage_stats = df.groupby('stage')[numeric_columns].agg(['mean', 'std']).round(3) for stage in stage_stats.index: f.write(f"\n{stage}阶段:\n") stats = stage_stats.loc[stage] f.write(f" 平均强度: {stats[('mean_intensity', 'mean')]} ± {stats[('mean_intensity', 'std')]}\n") f.write(f" 噪声水平: {stats[('std_intensity', 'mean')]} ± {stats[('std_intensity', 'std')]}\n") f.write(f" 对比度: {stats[('contrast', 'mean')]} ± {stats[('contrast', 'std')]}\n") f.write(f" 信息熵: {stats[('entropy', 'mean')]} ± {stats[('entropy', 'std')]}\n") f.write(f" 信噪比: {stats[('snr_estimate', 'mean')]} ± {stats[('snr_estimate', 'std')]}\n") # 处理效果分析 f.write("\n处理效果分析:\n") f.write("-" * 50 + "\n") # 只选择数值列进行计算 original_numeric = df[df['stage'] == '1_original'][numeric_columns].mean() final_numeric = df[df['stage'] == '7_final'][numeric_columns].mean() if original_numeric['std_intensity'] > 0: noise_reduction = ((original_numeric['std_intensity'] - final_numeric['std_intensity']) / original_numeric['std_intensity'] * 100) else: noise_reduction = 0 if original_numeric['contrast'] > 0: contrast_improvement = ((final_numeric['contrast'] - original_numeric['contrast']) / original_numeric['contrast'] * 100) else: contrast_improvement = 0 if original_numeric['entropy'] > 0: entropy_change = ((final_numeric['entropy'] - original_numeric['entropy']) / original_numeric['entropy'] * 100) else: entropy_change = 0 f.write(f"噪声标准差减少: {noise_reduction:.1f}%\n") f.write(f"对比度提升: {contrast_improvement:.1f}%\n") f.write(f"信息熵变化: {entropy_change:+.1f}%\n") if original_numeric['snr_estimate'] > 0: snr_improvement = ((final_numeric['snr_estimate'] - original_numeric['snr_estimate']) / original_numeric['snr_estimate'] * 100) f.write(f"信噪比改善: {snr_improvement:.1f}%\n") else: f.write("信噪比改善: 无法计算\n") # 保存CSV格式数据 csv_path = os.path.join(output_dir, "detailed_metrics.csv") df.to_csv(csv_path, index=False, encoding='utf-8-sig') print(f"✓ 量化报告已保存: {report_path}") print(f"✓ 详细数据已保存: {csv_path}") return report_path, csv_path def batch_process_folder(self, input_folder, output_base_dir): """批量处理文件夹中的所有TIF图像""" # 创建输出目录结构 dir_A = os.path.join(output_base_dir, "A_最终图像") dir_B = os.path.join(output_base_dir, "B_对比图像") dir_C = os.path.join(output_base_dir, "C_强度分析") dir_all_steps = os.path.join(output_base_dir, "D_所有处理步骤") dir_reports = os.path.join(output_base_dir, "量化报告") # 查找所有TIF文件 tif_patterns = ["*.tif", "*.tiff", "*.TIF", "*.TIFF"] tif_files = [] for pattern in tif_patterns: tif_files.extend(glob.glob(os.path.join(input_folder, pattern))) tif_files.extend(glob.glob(os.path.join(input_folder, "**", pattern), recursive=True)) tif_files = sorted(list(set(tif_files))) if not tif_files: print("未找到任何TIF文件") return print(f"找到 {len(tif_files)} 个TIF文件,开始批量处理...") processed_count = 0 for tif_file in tif_files: try: original_filename = os.path.splitext(os.path.basename(tif_file))[0] print(f"\n处理中: {original_filename}") # 执行预处理流水线 processing_stages, clean_filename, original_filename = self.full_preprocessing_pipeline(tif_file) # 保存所有处理步骤的图像 self.save_all_processing_steps(processing_stages, clean_filename, original_filename, dir_all_steps) # 保存各类输出 self.save_final_images(processing_stages, clean_filename, original_filename, dir_A) self.create_comparison_figure(processing_stages, clean_filename, original_filename, dir_B) self.create_intensity_comparison(processing_stages, clean_filename, original_filename, dir_C) processed_count += 1 print(f"✓ {original_filename} 处理完成") except Exception as e: print(f"❌ 处理失败 {os.path.basename(tif_file)}: {e}") # 生成量化报告 if self.quantitative_data: self.generate_quantitative_report(dir_reports) print(f"\n批量处理完成! 成功处理 {processed_count}/{len(tif_files)} 个文件") print(f"输出目录结构:") print(f" A_最终图像: {dir_A} (包含: 1.jpg, 2.jpg, 3.jpg, 4.jpg等)") print(f" B_对比图像: {dir_B}") print(f" C_强度分析: {dir_C}") print(f" D_所有处理步骤: {dir_all_steps}") print(f" 量化报告: {dir_reports}") def main(): """主函数""" preprocessor = BatchEmbryoPreprocessor(target_size=(640, 640)) # 配置路径 input_folder = r"E:\ZZZ\origin" # 输入文件夹 output_base_dir = r"E:\ZZZ\OUTPUT" # 输出根目录 # 开始批量处理 preprocessor.batch_process_folder(input_folder, output_base_dir) if __name__ == "__main__": main()噪点增加50 对比度增加 暗部像素峰值减少 怎么改进?
10-10
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值