修改下面的代码,使得模型计算速度加大,其中内容可以更换使用更高级更快速的函数模块等,例如图像分割、砾石体积计算排序等。使得ct图像处理的更完美、质量更高,砾石提取的更快速。代码如下:import os
import re
import time
import warnings
import numpy as np
import cupy as cp
import cupyx.scipy.ndimage as cpx_ndi
import tifffile as tiff
from skimage import filters, morphology, measure, segmentation, feature, exposure
from scipy import ndimage as ndi
from sklearn.decomposition import PCA
from concurrent.futures import ProcessPoolExecutor
import matplotlib.pyplot as plt
import logging
import trimesh
from tqdm import tqdm, trange
# 设置路径和参数
input_path = r"E:\GHJ\shaliyan\XY"
output_dir = r"E:\GHJ\shaliyan\keli"
voxel_size = 38.6598 # 微米
target_count = 1500 # 提取的最大颗粒数量
def natural_sort_key(s):
"""自然排序键函数,用于正确排序文件名"""
return [int(text) if text.isdigit() else text.lower() for text in re.split(r'(\d+)', s)]
def read_tiff_sequence(folder_path):
"""从文件夹读取TIFF序列,使用GPU加速"""
print("读取TIFF序列...")
tiff_files = [f for f in os.listdir(folder_path) if f.endswith(('.tif', '.tiff'))]
tiff_files.sort(key=natural_sort_key)
first_image = tiff.imread(os.path.join(folder_path, tiff_files[0]))
height, width = first_image.shape
volume_gpu = cp.zeros((len(tiff_files), height, width), dtype=cp.float32)
# 添加进度条
batch_size = 50
with tqdm(total=len(tiff_files), desc="读取图像") as pbar:
for i in range(0, len(tiff_files), batch_size):
batch_files = tiff_files[i:i+batch_size]
for j, filename in enumerate(batch_files):
img_path = os.path.join(folder_path, filename)
volume_gpu[i+j] = cp.asarray(tiff.imread(img_path).astype(np.float32))
pbar.update(len(batch_files))
return volume_gpu
def preprocess_ct_gpu(volume_gpu):
"""在GPU上预处理CT体积数据"""
print("预处理CT数据 (GPU加速)...")
# 中值滤波去噪
print("应用中值滤波...")
filtered_volume = cpx_ndi.median_filter(volume_gpu, size=3)
# 对比度增强
print("应用对比度增强...")
enhanced_volume = cp.zeros_like(filtered_volume)
for i in tqdm(range(filtered_volume.shape[0]), desc="增强对比度"):
slice_data = filtered_volume[i]
slice_min = cp.min(slice_data)
slice_max = cp.max(slice_data)
normalized = (slice_data - slice_min) / (slice_max - slice_min) * 255
hist = cp.histogram(normalized.flatten(), bins=256, range=(0, 256))[0]
cdf = cp.cumsum(hist)
cdf_normalized = 255 * cdf / cdf[-1]
enhanced_volume[i] = cp.interp(normalized.flatten(),
cp.arange(256),
cdf_normalized).reshape(normalized.shape)
# Otsu阈值分割
print("应用阈值分割...")
middle_slice = cp.asnumpy(enhanced_volume[enhanced_volume.shape[0]//2])
thresh = filters.threshold_otsu(middle_slice)
binary_volume = enhanced_volume > thresh
return binary_volume
def segment_grains_gpu(binary_volume_gpu, min_distance=3, footprint_size=3):
"""使用GPU加速的分水岭算法分割砾石"""
print("分割砾石 (GPU加速)...")
# 转到CPU进行距离变换(因为GPU版本可能不稳定)
binary_cpu = cp.asnumpy(binary_volume_gpu)
# 距离变换
print("计算距离变换...")
distance = ndi.distance_transform_edt(binary_cpu)
distance_gpu = cp.asarray(distance)
# 使用maximum_filter找局部极大值
print("寻找局部极大值...")
size = footprint_size if footprint_size % 2 == 1 else footprint_size + 1
max_filtered = cpx_ndi.maximum_filter(distance_gpu, size=size)
# 局部极大值检测
local_max = (distance_gpu == max_filtered) & (distance_gpu > min_distance)
local_max = cp.asnumpy(local_max & binary_volume_gpu)
# 去除小连通域
local_max = morphology.remove_small_objects(local_max, min_size=2)
# 标记种子点
markers = measure.label(local_max)
# 分水岭分割
print("应用分水岭算法...")
labels = segmentation.watershed(-distance, markers, mask=binary_cpu)
return labels
def calculate_volumes(labels):
"""计算每个标签的体积"""
print("计算颗粒体积...")
unique_labels = np.unique(labels)
unique_labels = unique_labels[unique_labels > 0] # 排除背景标签0
volumes = {}
for label in tqdm(unique_labels, desc="计算体积"):
volumes[label] = np.sum(labels == label)
return volumes
def extract_and_save_grains_high_quality(labels, output_dir, voxel_size, target_count):
"""提取每个砾石并保存为高质量STL文件"""
print("提取并保存高质量STL文件...")
os.makedirs(output_dir, exist_ok=True)
# 计算体积并排序
volumes = calculate_volumes(labels)
sorted_labels = sorted(volumes.keys(), key=lambda x: volumes[x], reverse=True)
if len(sorted_labels) > target_count:
sorted_labels = sorted_labels[:target_count]
print(f"将提取 {len(sorted_labels)} 个最大的颗粒")
# 使用进度条显示多进程处理进度
with ProcessPoolExecutor(max_workers=os.cpu_count()) as executor:
futures = []
for i, label in enumerate(sorted_labels):
futures.append(executor.submit(
process_single_grain,
labels, label, i, output_dir, voxel_size, volumes[label]
))
# 使用tqdm显示进度
for _ in tqdm(futures, desc="处理颗粒", total=len(futures)):
_.result()
def process_single_grain(labels, label, index, output_dir, voxel_size, volume):
"""处理单个颗粒并生成高质量STL"""
try:
print(f"处理颗粒 {index+1} (标签 {label}, 体积 {volume} 体素)")
# 提取当前砾石的二值掩模
grain_mask = labels == label
# 生成高质量网格 - 使用较小的步长和较高的等值面水平
vertices, faces, normals, values = measure.marching_cubes(
grain_mask.astype(np.float32),
level=0.5,
step_size=1, # 较小的步长以提高精度
allow_degenerate=False,
gradient_direction='descent'
)
# 创建网格对象
mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
# 应用真实尺度
mesh.vertices *= voxel_size
# 平滑网格
mesh = mesh.smooth()
# 应用PCA对齐
mesh = align_mesh(mesh)
# 保存为STL文件
output_path = os.path.join(output_dir, f"KL-{index+1:03d}.stl")
mesh.export(output_path, file_type='stl')
print(f"已保存: {output_path}")
return True
except Exception as e:
print(f"处理颗粒 {label} 时出错: {str(e)}")
return False
def align_mesh(mesh):
"""使用PCA对齐网格方向"""
# 计算顶点的主成分
pca = PCA(n_components=3)
pca.fit(mesh.vertices)
# 旋转网格以使主成分与坐标轴对齐
transformed_vertices = pca.transform(mesh.vertices)
# 创建新的对齐后的网格
aligned_mesh = trimesh.Trimesh(vertices=transformed_vertices, faces=mesh.faces)
return aligned_mesh
def visualize_results(labels, output_dir):
"""可视化结果"""
print("生成可视化结果...")
# 创建可视化目录
viz_dir = os.path.join(output_dir, "visualization")
os.makedirs(viz_dir, exist_ok=True)
# 生成中间切片的标签可视化
mid_slice = labels.shape[0] // 2
plt.figure(figsize=(12, 10))
plt.imshow(labels[mid_slice], cmap='nipy_spectral')
plt.colorbar(label='颗粒标签')
plt.title(f'中间切片 (Z={mid_slice}) 的颗粒分割结果')
plt.savefig(os.path.join(viz_dir, 'segmentation_result.png'), dpi=300, bbox_inches='tight')
plt.close()
# 生成体积分布直方图
volumes = calculate_volumes(labels)
vol_values = list(volumes.values())
plt.figure(figsize=(10, 6))
plt.hist(vol_values, bins=50, alpha=0.7, color='steelblue')
plt.axvline(np.mean(vol_values), color='red', linestyle='dashed', linewidth=2, label=f'平均体积: {np.mean(vol_values):.0f}')
plt.xlabel('体积 (体素数)')
plt.ylabel('频率')
plt.title('颗粒体积分布')
plt.legend()
plt.savefig(os.path.join(viz_dir, 'volume_distribution.png'), dpi=300, bbox_inches='tight')
plt.close()
def main():
"""主函数"""
start_time = time.time()
# 设置警告过滤
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
try:
print("开始处理...")
with tqdm(total=5, desc="总体进度") as pbar:
# 1. 读取CT数据
ct_volume_gpu = read_tiff_sequence(input_path)
pbar.update(1)
# 2. 预处理CT数据
processed_volume_gpu = preprocess_ct_gpu(ct_volume_gpu)
pbar.update(1)
# 3. 分割砾石
labels = segment_grains_gpu(processed_volume_gpu,
min_distance=3,
footprint_size=3)
pbar.update(1)
# 4. 提取并保存砾石
extract_and_save_grains_high_quality(labels, output_dir, voxel_size, target_count)
pbar.update(1)
# 5. 生成可视化结果
visualize_results(labels, output_dir)
pbar.update(1)
total_time = time.time() - start_time
print(f"\n处理完成! 总共耗时: {total_time/60:.2f} 分钟")
except Exception as e:
print(f"处理过程中出错: {str(e)}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()