import os
import time
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import transforms
from PIL import Image
from tqdm import tqdm
import matplotlib.pyplot as plt
os.system("") # 激活ANSI支持
torch.manual_seed(42)
np.random.seed(42)
# --------------------------
# 1. 数据集定义(保持不变)
# --------------------------
class FluidImageDataset(Dataset):
def __init__(self, blade_params, pressure_dir, velocity_u_dir, velocity_v_dir, velocity_w_dir, transform=None):
self.blade_params = blade_params
self.pressure_dir = pressure_dir
self.velocity_u_dir = velocity_u_dir
self.velocity_v_dir = velocity_v_dir
self.velocity_w_dir = velocity_w_dir
self.transform = transform or transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
])
def __len__(self):
return len(self.blade_params)
def __getitem__(self, idx):
params = torch.tensor(self.blade_params.iloc[idx].values, dtype=torch.float32)
img_id = idx + 1
# 简化图像加载
def load_img(path):
try:
return self.transform(Image.open(path).convert('L'))
except:
return torch.zeros(1, 256, 256)
return {'params': params, 'outputs': {
'pressure': load_img(os.path.join(self.pressure_dir, f"Pressure_{img_id}.jpg")),
'velocity_u': load_img(os.path.join(self.velocity_u_dir, f"Velocity_u_{img_id}.jpg")),
'velocity_v': load_img(os.path.join(self.velocity_v_dir, f"Velocity_v_{img_id}.jpg")),
'velocity_w': load_img(os.path.join(self.velocity_w_dir, f"Velocity_w_{img_id}.jpg")),
}}
# --------------------------
# 2. 傅里叶KAN层(核心修改:使用傅里叶基函数替代原有非线性函数)
# --------------------------
class FourierKANLayer(nn.Module):
def __init__(self, input_dim, output_dim, grid_size=16, add_bias=True, smooth_initialization=True):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.grid_size = grid_size # 傅里叶频率数量
self.add_bias = add_bias
# 傅里叶系数初始化(正弦和余弦)
# 平滑初始化:高频系数衰减,保证初始函数平滑
grid_norm = (torch.arange(grid_size) + 1) **2 if smooth_initialization else np.sqrt(grid_size)
self.fourier_coeffs = nn.Parameter(
torch.randn(2, output_dim, input_dim, grid_size) / (np.sqrt(input_dim) * grid_norm)
)
# 偏置项
if self.add_bias:
self.bias = nn.Parameter(torch.zeros(1, output_dim))
def forward(self, x):
# 处理向量数据 (B, input_dim) 或图像数据 (B, C, H, W)
original_shape = x.shape
if x.dim() == 4:
# 图像数据展平为 (B*H*W, C) 便于处理
batch_size, channels, height, width = x.shape
x = x.permute(0, 2, 3, 1).reshape(-1, channels) # (B*H*W, C)
# 傅里叶基函数计算:cos(kx) 和 sin(kx)
k = torch.arange(1, self.grid_size + 1, device=x.device).reshape(1, 1, 1, self.grid_size) # 频率
x_expanded = x.unsqueeze(1).unsqueeze(3) # (B, 1, input_dim, 1)
cos_terms = torch.cos(k * x_expanded) # (B, 1, input_dim, grid_size)
sin_terms = torch.sin(k * x_expanded) # (B, 1, input_dim, grid_size)
# 傅里叶系数加权求和
y = torch.sum(cos_terms * self.fourier_coeffs[0:1], dim=(-2, -1)) # 余弦项贡献
y += torch.sum(sin_terms * self.fourier_coeffs[1:2], dim=(-2, -1)) # 正弦项贡献
# 添加偏置
if self.add_bias:
y += self.bias
# 恢复图像数据形状
if len(original_shape) == 4:
y = y.reshape(batch_size, height, width, self.output_dim).permute(0, 3, 1, 2) # (B, output_dim, H, W)
else:
y = y.reshape(original_shape[:-1] + (self.output_dim,)) # 向量数据恢复形状
return y
class KANFeatureExtractor(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.layers = nn.Sequential(
FourierKANLayer(input_dim, 128, grid_size=16), # 傅里叶KAN层
nn.LayerNorm(128),
FourierKANLayer(128, 128, grid_size=16),
nn.LayerNorm(128),
FourierKANLayer(128, 256, grid_size=16),
nn.LayerNorm(256),
FourierKANLayer(256, 256, grid_size=16)
)
def forward(self, x):
return self.layers(x)
# --------------------------
# 3. U-Net结构(使用傅里叶KAN层替代原有激活函数)
# --------------------------
class MiniUNetEncoder(nn.Module):
def __init__(self, in_channels=8):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, 16, kernel_size=3, padding=1),
nn.GroupNorm(4, 16),
FourierKANLayer(16, 16, grid_size=16), # 傅里叶KAN替代ReLU
nn.Conv2d(16, 16, kernel_size=3, padding=1),
nn.GroupNorm(4, 16),
FourierKANLayer(16, 16, grid_size=16)
)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=3, padding=1),
nn.GroupNorm(4, 32),
FourierKANLayer(32, 32, grid_size=16),
nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.GroupNorm(4, 32),
FourierKANLayer(32, 32, grid_size=16)
)
self.pool2 = nn.MaxPool2d(2, 2)
def forward(self, x):
c1 = self.conv1(x)
p1 = self.pool1(c1)
c2 = self.conv2(p1)
p2 = self.pool2(c2)
return c1, c2, p2
class MiniUNetDecoder(nn.Module):
def __init__(self, out_channels=1):
super().__init__()
self.up1 = nn.ConvTranspose2d(32, 16, kernel_size=2, stride=2)
self.conv3 = nn.Sequential(
nn.Conv2d(48, 16, kernel_size=3, padding=1),
nn.GroupNorm(4, 16),
FourierKANLayer(16, 16, grid_size=16) # 傅里叶KAN替代ReLU
)
self.up2 = nn.ConvTranspose2d(16, 8, kernel_size=2, stride=2)
self.final_conv = nn.Conv2d(24, out_channels, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, c1, c2, p2):
u1 = self.up1(p2)
cat1 = torch.cat([u1, c2], dim=1)
d1 = self.conv3(cat1)
u2 = self.up2(d1)
cat2 = torch.cat([u2, c1], dim=1)
out = self.final_conv(cat2)
return self.sigmoid(out)
class MiniUNetPredictor(nn.Module):
def __init__(self):
super().__init__()
self.encoder = MiniUNetEncoder()
self.decoder = MiniUNetDecoder()
def forward(self, x):
c1, c2, p2 = self.encoder(x)
return self.decoder(c1, c2, p2)
# --------------------------
# 4. 模型组装(保持结构不变,使用傅里叶KAN层)
# --------------------------
class LightKANUNetModel(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.kan_feature = KANFeatureExtractor(input_dim)
# 特征投影模块(使用傅里叶KAN)
self.feature_proj = nn.Sequential(
nn.Linear(256, 128),
nn.LayerNorm(128),
FourierKANLayer(128, 128, grid_size=16), # 傅里叶KAN替代ReLU
# 生成初始特征图
nn.Linear(128, 8 * 16 * 16),
nn.Unflatten(1, (8, 16, 16)),
# 上采样卷积块
nn.Conv2d(8, 16, kernel_size=3, padding=1),
nn.GroupNorm(4, 16),
FourierKANLayer(16, 16, grid_size=16),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(16, 32, kernel_size=3, padding=1),
nn.GroupNorm(4, 32),
FourierKANLayer(32, 32, grid_size=16),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(32, 64, kernel_size=3, padding=1),
nn.GroupNorm(4, 64),
FourierKANLayer(64, 64, grid_size=16),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(64, 32, kernel_size=3, padding=1),
nn.GroupNorm(4, 32),
FourierKANLayer(32, 32, grid_size=16),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(32, 8, kernel_size=3, padding=1),
nn.GroupNorm(4, 8),
FourierKANLayer(8, 8, grid_size=16)
)
self.predictors = nn.ModuleDict({
'pressure': MiniUNetPredictor(),
'velocity_u': MiniUNetPredictor(),
'velocity_v': MiniUNetPredictor(),
'velocity_w': MiniUNetPredictor()
})
def forward(self, x):
features = self.kan_feature(x)
feat_map = self.feature_proj(features)
return {k: self.predictors[k](feat_map) for k in self.predictors}
# --------------------------
# 5. 训练与工具函数(保持不变)
# --------------------------
def load_data(blade_params_path, data_dir):
blade_params = pd.read_excel(blade_params_path).iloc[:, 1:]
print(f"叶片参数形状:{blade_params.shape}(样本数×参数数)")
dataset = FluidImageDataset(
blade_params,
os.path.join(data_dir, "Pressure"),
os.path.join(data_dir, "Velocity_u"),
os.path.join(data_dir, "Velocity_v"),
os.path.join(data_dir, "Velocity_w")
)
total_size = len(dataset)
train_size = int(0.8 * total_size)
val_size = int(0.1 * total_size)
test_size = total_size - train_size - val_size
generator = torch.Generator().manual_seed(42)
train_dataset, val_dataset, test_dataset = random_split(
dataset, [train_size, val_size, test_size], generator=generator
)
return (
DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=0),
DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=0),
DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=0),
blade_params.shape[1]
)
def train_model(model, train_loader, criterion, optimizer, device, epochs=3):
model.train()
if torch.cuda.is_available():
torch.cuda.reset_peak_memory_stats()
print(f"初始GPU内存:{torch.cuda.memory_allocated() / 1e6:.1f} MB")
for epoch in range(epochs):
running_loss = 0.0
pbar = tqdm(train_loader, total=len(train_loader), desc=f"Epoch {epoch + 1}/{epochs}")
for batch_idx, data in enumerate(pbar):
start_time = time.time()
params = data['params'].to(device)
true_outputs = {k: v.to(device) for k, v in data['outputs'].items()}
optimizer.zero_grad()
with torch.amp.autocast('cuda', enabled=torch.cuda.is_available()):
pred_outputs = model(params)
loss = sum(criterion(pred_outputs[k], true_outputs[k]) for k in pred_outputs)
loss.backward()
optimizer.step()
running_loss += loss.item()
batch_time = time.time() - start_time
if (batch_idx + 1) % 10 == 0 or batch_idx == len(train_loader) - 1:
avg_loss = running_loss / (batch_idx + 1)
gpu_mem = torch.cuda.memory_allocated() / 1e6 if torch.cuda.is_available() else 0
pbar.set_postfix({
"批损失": f"{loss.item():.4f}",
"平均损失": f"{avg_loss:.4f}",
"耗时": f"{batch_time:.2f}s",
"GPU内存": f"{gpu_mem:.1f} MB"
})
print(f"Epoch {epoch + 1} 完成,平均损失:{running_loss / len(train_loader):.4f}")
return model
def evaluate_model(model, test_loader, criterion, device):
model.eval()
metrics = {k: [] for k in ['pressure', 'velocity_u', 'velocity_v', 'velocity_w']}
with torch.no_grad():
for data in test_loader:
params = data['params'].to(device)
true_outputs = {k: v.to(device) for k, v in data['outputs'].items()}
pred_outputs = model(params)
for k in metrics:
metrics[k].append(criterion(pred_outputs[k], true_outputs[k]).item())
avg = {k: sum(v) / len(v) for k, v in metrics.items()}
print("\n测试集结果:")
print(f"总平均MSE:{sum(avg.values()) / 4:.4f}")
for k, v in avg.items():
print(f" {k}:{v:.6f}")
return avg
def visualize_predictions(model, test_loader, device, num_samples=3):
model.eval()
save_dir = "KAN_UNet_Results"
os.makedirs(save_dir, exist_ok=True)
with torch.no_grad():
for i, data in enumerate(test_loader):
if i >= num_samples:
break
params = data['params'].to(device)
true = data['outputs']
pred = {k: v.cpu() for k, v in model(params).items()}
fig, axes = plt.subplots(1, 9, figsize=(24, 5))
axes[0].text(0.1, 0.5, "参数:\n" + "\n".join(
[f"p{j}: {v:.2f}" for j, v in enumerate(params[0].cpu().numpy().round(2))]),
fontsize=9, va='center')
axes[0].axis('off')
for col, k in enumerate(['pressure', 'velocity_u', 'velocity_v', 'velocity_w']):
axes[2 * col + 1].imshow(true[k][0].squeeze(), cmap='viridis')
axes[2 * col + 1].set_title(f'真实{k}')
axes[2 * col + 1].axis('off')
axes[2 * col + 2].imshow(pred[k][0].squeeze(), cmap='viridis')
axes[2 * col + 2].set_title(f'预测{k}')
axes[2 * col + 2].axis('off')
plt.tight_layout()
plt.savefig(f"{save_dir}/样本_{i + 1}.png", dpi=200)
plt.close()
print(f"已保存样本 {i + 1} 可视化结果")
def main():
save_dir = "KAN_UNet_Results"
blade_params_path = r"C:\Users\ZYQ\PycharmProjects\2025.07.09\Data\Blade parameters.xlsx"
data_dir = r"C:\Users\ZYQ\PycharmProjects\2025.07.09\Data"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备:{device}")
train_loader, val_loader, test_loader, input_dim = load_data(blade_params_path, data_dir)
print(f"输入维度:{input_dim},训练批次:{len(train_loader)}")
model = LightKANUNetModel(input_dim).to(device)
total_params = sum(p.numel() for p in model.parameters())
print(f"模型参数量:{total_params / 1e6:.2f} M")
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-5)
print("\n开始训练...")
model = train_model(model, train_loader, criterion, optimizer, device, epochs=3)
evaluate_model(model, test_loader, criterion, device)
visualize_predictions(model, test_loader, device)
torch.save(model.state_dict(), f"{save_dir}/fourier_kan_unet.pth")
print(f"模型保存至:{save_dir}/fourier_kan_unet.pth")
if __name__ == "__main__":
main()
最新发布