import torch
import torch.nn as nn
import torch.nn.functional as F
# -----------------------------
# 1. 频域卷积块 (增强频率特征表示)
# -----------------------------
class FreqConvBlock(nn.Module):
def __init__(self, in_ch, out_ch):
super().__init__()
self.net = nn.Sequential(
nn.Conv1d(in_ch, out_ch, kernel_size=3, padding=1),
nn.BatchNorm1d(out_ch),
nn.GELU(),
nn.Conv1d(out_ch, out_ch, kernel_size=3, padding=1),
nn.BatchNorm1d(out_ch),
nn.GELU(),
)
def forward(self, x):
# x: [B, N, C] → [B, C, N]
return self.net(x.transpose(1, 2)).transpose(1, 2)
# -----------------------------
# 2. 频域通道注意力 (FCA)
# -----------------------------
class FreqChannelAttention(nn.Module):
def __init__(self, dim, reduction=8):
super().__init__()
self.fc1 = nn.Linear(dim, dim // reduction)
self.fc2 = nn.Linear(dim // reduction, dim)
self.act = nn.GELU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# x: [B, N, C]
attn = torch.mean(x, dim=1) # 全局平均池化
attn = self.fc2(self.act(self.fc1(attn)))
attn = self.sigmoid(attn).unsqueeze(1)
return x * attn
# -----------------------------
# 3. 多头自注意力块
# -----------------------------
class TransformerBlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, drop=0.1):
super().__init__()
self.norm1 = nn.LayerNorm(dim)
self.attn = nn.MultiheadAttention(dim, num_heads, dropout=drop, batch_first=True)
self.norm2 = nn.LayerNorm(dim)
self.mlp = nn.Sequential(
nn.Linear(dim, int(dim * mlp_ratio)),
nn.GELU(),
nn.Dropout(drop),
nn.Linear(int(dim * mlp_ratio), dim),
nn.Dropout(drop)
)
def forward(self, x):
h = x
x = self.norm1(x)
x, _ = self.attn(x, x, x)
x = x + h
h = x
x = self.norm2(x)
x = x + self.mlp(x)
return x
# -----------------------------
# 4. 主网络结构:FreqFormerV7
# -----------------------------
class FreqFormerV7(nn.Module):
def __init__(self, num_classes=13, embed_dim=192, depth=4, num_heads=6):
super().__init__()
# --- 空间嵌入 ---
self.spatial_embed = nn.Linear(6, embed_dim) # xyz + RGB or normal
# --- FFT 编码 ---
self.freq_proj = nn.Linear(12, embed_dim)
self.freq_conv = FreqConvBlock(embed_dim, embed_dim)
self.fca = FreqChannelAttention(embed_dim)
# --- 门控融合 ---
self.fuse_gate = nn.Sequential(
nn.Linear(2 * embed_dim, embed_dim), # 输入是spatial+freq拼接
nn.ReLU(),
nn.Linear(embed_dim, embed_dim)
)
# --- 主干 Transformer ---
self.blocks = nn.ModuleList([
TransformerBlock(embed_dim, num_heads=num_heads)
for _ in range(depth)
])
# --- 分类头 ---
self.cls_head = nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Dropout(0.2),
nn.Linear(embed_dim, num_classes)
)
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
def forward(self, coords, feats=None):
# coords: [B, N, 3]
# feats: [B, N, 3] or None
if feats is not None:
x = torch.cat([coords, feats], dim=-1)
else:
# 如果没有额外特征,使用零填充保持维度一致
feats = torch.zeros_like(coords)
x = torch.cat([coords, feats], dim=-1)
B, N, _ = x.shape
# --- 空间分支 ---
spatial_feat = self.spatial_embed(x) # [B, N, embed_dim]
# --- 频域分支(关键改进)---
# 1. 使用坐标+特征作为FFT输入
fft_input = x # 包含坐标和特征
fft_feat_complex = torch.fft.fft(fft_input, dim=1)
# 2. 分离实部虚部并投影
real_part = fft_feat_complex.real
imag_part = fft_feat_complex.imag
freq_feat = self.freq_proj(torch.cat([real_part, imag_part], dim=-1))
# 3. 频域卷积处理
freq_feat = self.freq_conv(freq_feat)
# 4. 频域通道注意力
freq_feat = self.fca(freq_feat)
# --- 门控融合机制(核心改进)---
# 1. 生成门控信号
gate_input = torch.cat([spatial_feat, freq_feat], dim=-1)
gate = torch.sigmoid(self.fuse_gate(gate_input)) # [B, N, embed_dim]
# 2. 加权融合
fused = gate * spatial_feat + (1 - gate) * freq_feat
# --- Transformer层 ---
for blk in self.blocks:
fused = blk(fused)
# --- 分类头 ---
out = self.cls_head(fused)
return out
if __name__ == "__main__":
model = FreqFormerV7(num_classes=13)
x = torch.randn(2, 1024, 3)
rgb = torch.randn(2, 1024, 3)
y = model(x, rgb)
print("Output:", y.shape) # [2, 1024, 13]这是模型代码# train_freqformer_v8.py
"""
Train script for FreqFormerV6
usage example:
python train_freqformer_v6.py --data_dir <…> --batch_size 4 --num_epochs 150
"""
import os, time, argparse
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
from torch.optim.lr_scheduler import SequentialLR, LinearLR, CosineAnnealingLR
# -------------------------------
# Import your V6 model (adjust path if needed)
from freqformer_v7 import FreqFormerV7
# -------------------------------
# Dice loss (multi-class)
def one_hot(labels, num_classes):
"""
labels: [N] (int)
returns: [N, C]
"""
y = torch.eye(num_classes, device=labels.device)[labels]
return y
def multiclass_dice_loss(probs, labels, eps=1e-6):
"""
probs: [BP, C], labels: [BP] (ints)
"""
C = probs.shape[1]
mask = (labels >= 0)
if mask.sum() == 0:
return probs.new_tensor(0.)
probs = probs[mask] # [M, C]
labels = labels[mask]
gt = one_hot(labels, C) # [M, C]
intersection = (probs * gt).sum(dim=0)
cardinality = probs.sum(dim=0) + gt.sum(dim=0)
dice = (2. * intersection + eps) / (cardinality + eps)
loss = 1.0 - dice
return loss.mean()
# -------------------------------
# Focal Loss
class FocalLoss(nn.Module):
def __init__(self, alpha=None, gamma=2.0, reduction='mean'):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
def forward(self, inputs, targets):
ce_loss = F.cross_entropy(inputs, targets, reduction='none', weight=self.alpha)
pt = torch.exp(-ce_loss)
focal_loss = (1 - pt)**self.gamma * ce_loss
if self.reduction == 'mean':
return focal_loss.mean()
elif self.reduction == 'sum':
return focal_loss.sum()
return focal_loss
# -------------------------------
# Lovasz softmax
def lovasz_grad(gt_sorted):
gts = gt_sorted.sum()
if gts == 0:
return torch.zeros_like(gt_sorted)
intersection = gts - gt_sorted.cumsum(0)
union = gts + (1 - gt_sorted).cumsum(0)
jaccard = 1. - intersection / union
if gt_sorted.numel() > 1:
jaccard[1:] = jaccard[1:] - jaccard[:-1]
return jaccard
def flatten_probas(probas, labels, ignore_index=-1):
mask = (labels != ignore_index)
if not mask.any():
return probas.new(0), labels.new(0)
probas = probas[mask]
labels = labels[mask]
return probas, labels
def lovasz_softmax(probas, labels, classes='present', ignore_index=-1):
C = probas.size(1)
losses = []
probas, labels = flatten_probas(probas, labels, ignore_index)
if probas.numel() == 0:
return probas.new_tensor(0.)
for c in range(C):
fg = (labels == c).float()
if classes == 'present' and fg.sum() == 0:
continue
class_pred = probas[:, c]
errors = (fg - class_pred).abs()
perm = torch.argsort(errors, descending=True)
fg_sorted = fg[perm]
grad = lovasz_grad(fg_sorted)
loss_c = torch.dot(F.relu(errors[perm]), grad)
losses.append(loss_c)
if len(losses) == 0:
return probas.new_tensor(0.)
return sum(losses) / len(losses)
# -------------------------------
# Dataset (S3DIS npy layout assumed)
class S3DISDatasetAug(Dataset):
def __init__(self, data_dir, split='train', val_area='Area_5', num_points=1024, augment=True):
self.num_points = num_points
self.augment = augment and (split == 'train')
self.files = []
self.rare_classes = [3,4,6,9,11] # 必须在此初始化
for f in sorted(os.listdir(data_dir)):
if not f.endswith('.npy'):
continue
if split == 'train' and val_area in f:
continue
if split == 'val' and val_area not in f:
continue
self.files.append(os.path.join(data_dir, f))
if len(self.files) == 0:
raise RuntimeError(f"No files found in {data_dir} (split={split})")
# 构建文件类别缓存
self.file_class_map = {}
for file_path in self.files:
# 使用内存映射方式加载,避免一次性读入内存
data = np.load(file_path, mmap_mode='r')
# 抽样计算类别(避免处理整个大文件)
labels_sample = data[::100, 6] # 每隔100个点取一个
unique_classes = np.unique(labels_sample).tolist()
self.file_class_map[file_path] = unique_classes
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
if self.augment and np.random.rand() < 0.3:
# 使用初始化时构建的缓存而非实时加载
rare_files = [f for f in self.files if any(
c in self.file_class_map[f] for c in self.rare_classes # 关键修改
)]
if rare_files: # 避免空列表
file_path = np.random.choice(rare_files)
return self._load_file(file_path) # 调用_load_file加载数据
# 如果没有稀有文件,则按正常路径加载
# 默认加载当前索引的文件
return self._load_file(self.files[idx])
# 新增方法,用于从文件路径加载数据并处理
def _load_file(self, file_path):
data = np.load(file_path)
coords = data[:, :3].astype(np.float32)
extra = data[:, 3:6].astype(np.float32)
labels = data[:, 6].astype(np.int64)
N = coords.shape[0]
if N >= self.num_points:
choice = np.random.choice(N, self.num_points, replace=False)
else:
choice = np.random.choice(N, self.num_points, replace=True)
coords = coords[choice]
extra = extra[choice]
labels = labels[choice]
if self.augment:
theta = np.random.uniform(0, 2*np.pi)
c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s, 0], [s, c, 0], [0,0,1]], dtype=np.float32)
coords = coords.dot(R.T)
scale = np.random.uniform(0.9,1.1)
coords = coords * scale
coords = coords + np.random.normal(0, 0.01, coords.shape).astype(np.float32)
local_feat = np.concatenate([coords, extra], axis=1)
return {
'coords': torch.from_numpy(coords).float(),
'extra': torch.from_numpy(extra).float(),
'local_feat': torch.from_numpy(local_feat).float(),
'label': torch.from_numpy(labels).long()
}
# -------------------------------
# Confusion matrix & IoU
def compute_confusion_matrix(preds, gts, num_classes):
mask = (gts >= 0) & (gts < num_classes)
gt = gts[mask].astype(np.int64)
pred = preds[mask].astype(np.int64)
conf = np.bincount(gt*num_classes + pred, minlength=num_classes**2)
return conf.reshape((num_classes, num_classes))
def compute_iou_from_conf(conf):
inter = np.diag(conf)
gt_sum = conf.sum(axis=1)
pred_sum = conf.sum(axis=0)
union = gt_sum + pred_sum - inter
iou = inter / (union + 1e-10)
return iou
# -------------------------------
# Class weights
def compute_class_weights(file_list, num_classes, method='inv_sqrt'):
counts = np.zeros(num_classes, dtype=np.float64)
for p in file_list:
data = np.load(p, mmap_mode='r')
labels = data[:,6].astype(np.int64)
for c in range(num_classes):
counts[c] += (labels == c).sum()
counts = np.maximum(counts, 1.0)
if method=='inv_freq':
weights = 1.0 / counts
elif method=='inv_sqrt':
weights = 1.0 / np.sqrt(counts)
else:
weights = np.ones_like(counts)
weights = weights / weights.sum() * num_classes
return torch.from_numpy(weights.astype(np.float32))
# -------------------------------
# Main training loop
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='/root/autodl-tmp/pointcloud_seg/data/S3DIS_new/processed_npy')
parser.add_argument('--save_dir', default='./checkpoints_v6')
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--num_epochs', type=int, default=300)
parser.add_argument('--num_points', type=int, default=1024)
parser.add_argument('--num_classes', type=int, default=13)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu')
parser.add_argument('--use_class_weights', action='store_true')
parser.add_argument('--use_lovasz', action='store_true')
parser.add_argument('--warmup_epochs', type=int, default=5)
parser.add_argument('--num_workers', type=int, default=8)
parser.add_argument('--grad_clip', type=float, default=1.0)
parser.add_argument('--use_focal', action='store_true', help='Use Focal Loss instead of CrossEntropy')
parser.add_argument('--focal_gamma', type=float, default=2.0, help='Gamma parameter for Focal Loss')
args = parser.parse_args()
os.makedirs(args.save_dir, exist_ok=True)
device = torch.device(args.device)
# Dataset
train_ds = S3DISDatasetAug(args.data_dir, split='train', num_points=args.num_points, augment=True)
val_ds = S3DISDatasetAug(args.data_dir, split='val', num_points=args.num_points, augment=False)
train_loader = DataLoader(train_ds, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, drop_last=True)
val_loader = DataLoader(val_ds, batch_size=1, shuffle=False, num_workers=max(1, args.num_workers//2))
# Class weights
class_weights = None
if args.use_class_weights:
print("Computing class weights...")
class_weights = compute_class_weights(train_ds.files, args.num_classes).to(device)
print("class weights:", class_weights.cpu().numpy())
# Model
model = FreqFormerV7(num_classes=args.num_classes)
if torch.cuda.device_count() > 1 and args.device.startswith('cuda'):
print("Using DataParallel on devices:", list(range(torch.cuda.device_count())))
model = torch.nn.DataParallel(model)
model = model.to(device)
# Focal Loss
focal_criterion = None
if args.use_focal:
print(f"Using Focal Loss with gamma={args.focal_gamma}")
alpha = class_weights if class_weights is not None else torch.ones(args.num_classes).to(device)
rare_classes = [3,4,6,9,11] # adjust as needed
for c in rare_classes:
if c < len(alpha):
alpha[c] *= 25.0
focal_criterion = FocalLoss(alpha=alpha, gamma=args.focal_gamma).to(device)
# 替换为新的组合调度器
optimizer = torch.optim.AdamW(
model.parameters(),
lr=args.lr,
weight_decay=1e-4
)
# 预热阶段调度器
warmup = torch.optim.lr_scheduler.LinearLR(
optimizer,
start_factor=0.01,
end_factor=1.0,
total_iters=args.warmup_epochs
)
# 余弦退火调度器
cosine = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=max(1, args.num_epochs - args.warmup_epochs)
)
# 组合调度器
scheduler = torch.optim.lr_scheduler.SequentialLR(
optimizer,
schedulers=[warmup, cosine],
milestones=[args.warmup_epochs]
)
best_miou = 0.0
# Training loop
for epoch in range(args.num_epochs):
model.train()
t0 = time.time()
running_loss = 0.0
for batch in train_loader:
coords = batch['coords'].to(device)
extra = batch['extra'].to(device)
labels = batch['label'].to(device)
optimizer.zero_grad()
logits = model(coords, extra)
B,N,C = logits.shape
logits_flat = logits.view(-1,C)
labels_flat = labels.view(-1)
probs = F.softmax(logits_flat, dim=-1)
dice = multiclass_dice_loss(probs, labels_flat)
if args.use_focal and focal_criterion is not None:
ce = focal_criterion(logits_flat, labels_flat)
else:
ce = F.cross_entropy(logits_flat, labels_flat, weight=class_weights, ignore_index=-1)
lov = lovasz_softmax(probs, labels_flat) if args.use_lovasz else logits_flat.new_tensor(0.0)
loss = 0.5*ce + 2.0*dice + 0.3*lov
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
running_loss += loss.item()
scheduler.step()
avg_loss = running_loss / max(1, len(train_loader))
t1 = time.time()
print(f"Epoch {epoch+1}/{args.num_epochs} TrainLoss: {avg_loss:.4f} Time: {(t1-t0):.1f}s LR: {optimizer.param_groups[0]['lr']:.6f}")
# Validation
if (epoch+1)%5==0 or (epoch+1)==args.num_epochs:
model.eval()
conf = np.zeros((args.num_classes, args.num_classes), dtype=np.int64)
tot_loss = 0.0
cnt = 0
with torch.no_grad():
for batch in val_loader:
coords = batch['coords'].to(device)
extra = batch['extra'].to(device)
labels = batch['label'].to(device)
logits = model(coords, extra)
B,N,C = logits.shape
logits_flat = logits.view(-1,C)
labels_flat = labels.view(-1)
if class_weights is not None:
loss_ce = F.cross_entropy(logits_flat, labels_flat, weight=class_weights, ignore_index=-1)
else:
loss_ce = F.cross_entropy(logits_flat, labels_flat, ignore_index=-1)
probs = F.softmax(logits_flat, dim=-1)
dice_val = multiclass_dice_loss(probs, labels_flat)
lov_val = lovasz_softmax(probs, labels_flat) if args.use_lovasz else logits_flat.new_tensor(0.0)
loss_val = 0.5*loss_ce + 2.0*dice_val + 0.3*lov_val
tot_loss += loss_val.item()
preds = logits.argmax(dim=-1).cpu().numpy().reshape(-1)
gts = labels.cpu().numpy().reshape(-1)
conf += compute_confusion_matrix(preds, gts, args.num_classes)
cnt += 1
mean_loss = tot_loss / max(1, cnt)
iou = compute_iou_from_conf(conf)
miou = np.nanmean(iou)
oa = np.diag(conf).sum() / (conf.sum() + 1e-12)
print(f"-- Validation Loss: {mean_loss:.4f} mIoU: {miou:.4f} OA: {oa:.4f}")
print("Per-class IoU:")
for cid, v in enumerate(iou):
print(f" class {cid:02d}: {v:.4f}")
if miou > best_miou:
best_miou = miou
path = os.path.join(args.save_dir, f'best_epoch_{epoch+1:03d}miou.pth')
state = {
'epoch': epoch+1,
'best_miou': best_miou,
'model_state_dict': model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict()
}
torch.save(state, path)
print("Saved best:", path)
# Final save
final_path = os.path.join(args.save_dir, f'final_epoch_{args.num_epochs:03d}miou.pth')
state = {
'epoch': args.num_epochs,
'best_miou': best_miou,
'model_state_dict': model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict()
}
torch.save(state, final_path)
print("Training finished. Final saved to:", final_path)
if __name__ == "__main__":
main()这是训练代码Epoch 191/300 TrainLoss: 1.0576 Time: 4.1s LR: 0.000301
Epoch 192/300 TrainLoss: 1.0843 Time: 4.0s LR: 0.000296
Epoch 193/300 TrainLoss: 1.0722 Time: 3.5s LR: 0.000291
Epoch 194/300 TrainLoss: 1.0783 Time: 3.8s LR: 0.000286
Epoch 195/300 TrainLoss: 1.0453 Time: 4.0s LR: 0.000281
-- Validation Loss: 2.0848 mIoU: 0.2943 OA: 0.7064
Per-class IoU:
class 00: 0.8757
class 01: 0.9414
class 02: 0.5905
class 03: 0.0000
class 04: 0.0043
class 05: 0.0703
class 06: 0.0104
class 07: 0.4147
class 08: 0.2735
class 09: 0.0155
class 10: 0.3772
class 11: 0.0155
class 12: 0.2371
Epoch 196/300 TrainLoss: 1.0407 Time: 3.8s LR: 0.000277
Epoch 197/300 TrainLoss: 1.0494 Time: 3.7s LR: 0.000272
Epoch 198/300 TrainLoss: 1.0328 Time: 4.4s LR: 0.000267
Epoch 199/300 TrainLoss: 1.0615 Time: 4.1s LR: 0.000262
Epoch 200/300 TrainLoss: 1.0648 Time: 4.7s LR: 0.000258
-- Validation Loss: 2.0971 mIoU: 0.2926 OA: 0.7016
Per-class IoU:
class 00: 0.8730
class 01: 0.9493
class 02: 0.5920
class 03: 0.0000
class 04: 0.0060
class 05: 0.0775
class 06: 0.0137
class 07: 0.4216
class 08: 0.2597
class 09: 0.0171
class 10: 0.3558
class 11: 0.0249
class 12: 0.2134
Epoch 201/300 TrainLoss: 1.0585 Time: 4.1s LR: 0.000253
Epoch 202/300 TrainLoss: 1.0568 Time: 3.9s LR: 0.000248
Epoch 203/300 TrainLoss: 1.0696 Time: 4.4s LR: 0.000244
Epoch 204/300 TrainLoss: 1.0561 Time: 3.9s LR: 0.000239
Epoch 205/300 TrainLoss: 1.0646 Time: 4.6s LR: 0.000235
-- Validation Loss: 2.1047 mIoU: 0.2952 OA: 0.7027
Per-class IoU:
class 00: 0.8765
class 01: 0.9540
class 02: 0.5869
class 03: 0.0000
class 04: 0.0017
class 05: 0.0714
class 06: 0.0129
class 07: 0.4279
class 08: 0.2747
class 09: 0.0217
class 10: 0.3561
class 11: 0.0148
class 12: 0.2386
Epoch 206/300 TrainLoss: 1.0481 Time: 3.1s LR: 0.000230
Epoch 207/300 TrainLoss: 1.0503 Time: 4.0s LR: 0.000226
Epoch 208/300 TrainLoss: 1.0225 Time: 4.1s LR: 0.000221
Epoch 209/300 TrainLoss: 1.0408 Time: 4.3s LR: 0.000217
Epoch 210/300 TrainLoss: 1.0131 Time: 3.4s LR: 0.000213
-- Validation Loss: 2.0843 mIoU: 0.2986 OA: 0.7019
Per-class IoU:
class 00: 0.8588
class 01: 0.9408
class 02: 0.5806
class 03: 0.0000
class 04: 0.0025
class 05: 0.1015
class 06: 0.0115
class 07: 0.4369
class 08: 0.2996
class 09: 0.0103
class 10: 0.3683
class 11: 0.0248
class 12: 0.2459
Epoch 211/300 TrainLoss: 1.0084 Time: 4.3s LR: 0.000208
Epoch 212/300 TrainLoss: 1.0382 Time: 4.2s LR: 0.000204
Epoch 213/300 TrainLoss: 1.0187 Time: 4.1s LR: 0.000200
Epoch 214/300 TrainLoss: 1.0115 Time: 3.4s LR: 0.000195
Epoch 215/300 TrainLoss: 1.0261 Time: 4.4s LR: 0.000191
-- Validation Loss: 2.0712 mIoU: 0.3013 OA: 0.7060
Per-class IoU:
class 00: 0.8796
class 01: 0.9472
class 02: 0.5912
class 03: 0.0000
class 04: 0.0033
class 05: 0.0872
class 06: 0.0202
class 07: 0.4361
class 08: 0.2779
class 09: 0.0216
class 10: 0.3913
class 11: 0.0255
class 12: 0.2362
Epoch 216/300 TrainLoss: 1.0524 Time: 4.7s LR: 0.000187
Epoch 217/300 TrainLoss: 1.0170 Time: 4.1s LR: 0.000183
Epoch 218/300 TrainLoss: 1.0445 Time: 4.3s LR: 0.000179
Epoch 219/300 TrainLoss: 1.0363 Time: 3.8s LR: 0.000175
Epoch 220/300 TrainLoss: 1.0483 Time: 3.4s LR: 0.000171
-- Validation Loss: 2.0768 mIoU: 0.3005 OA: 0.7093
Per-class IoU:
class 00: 0.8785
class 01: 0.9522
class 02: 0.5945
class 03: 0.0000
class 04: 0.0025
class 05: 0.0956
class 06: 0.0153
class 07: 0.4366
class 08: 0.2634
class 09: 0.0278
class 10: 0.3823
class 11: 0.0204
class 12: 0.2381
Epoch 221/300 TrainLoss: 1.0348 Time: 4.4s LR: 0.000167
Epoch 222/300 TrainLoss: 1.0155 Time: 4.4s LR: 0.000163
Epoch 223/300 TrainLoss: 1.0448 Time: 4.7s LR: 0.000159
Epoch 224/300 TrainLoss: 1.0388 Time: 4.7s LR: 0.000155
Epoch 225/300 TrainLoss: 1.0155 Time: 3.6s LR: 0.000151
-- Validation Loss: 2.0820 mIoU: 0.2991 OA: 0.7100
Per-class IoU:
class 00: 0.8828
class 01: 0.9503
class 02: 0.5940
class 03: 0.0000
class 04: 0.0018
class 05: 0.1058
class 06: 0.0181
class 07: 0.4237
class 08: 0.2808
class 09: 0.0079
class 10: 0.3714
class 11: 0.0153
class 12: 0.2359
Epoch 226/300 TrainLoss: 1.0076 Time: 4.2s LR: 0.000147
Epoch 227/300 TrainLoss: 1.0267 Time: 4.1s LR: 0.000144
Epoch 228/300 TrainLoss: 1.0221 Time: 3.3s LR: 0.000140
Epoch 229/300 TrainLoss: 1.0115 Time: 4.4s LR: 0.000136
Epoch 230/300 TrainLoss: 1.0153 Time: 3.4s LR: 0.000133
-- Validation Loss: 2.1169 mIoU: 0.2902 OA: 0.7023
Per-class IoU:
class 00: 0.8778
class 01: 0.9415
class 02: 0.5843
class 03: 0.0000
class 04: 0.0008
class 05: 0.0832
class 06: 0.0124
class 07: 0.4169
class 08: 0.2572
class 09: 0.0060
class 10: 0.3541
class 11: 0.0161
class 12: 0.2223
Epoch 231/300 TrainLoss: 1.0100 Time: 4.6s LR: 0.000129
Epoch 232/300 TrainLoss: 1.0263 Time: 3.5s LR: 0.000125
Epoch 233/300 TrainLoss: 1.0047 Time: 5.9s LR: 0.000122
Epoch 234/300 TrainLoss: 0.9948 Time: 4.1s LR: 0.000119
Epoch 235/300 TrainLoss: 1.0034 Time: 3.6s LR: 0.000115
-- Validation Loss: 2.1010 mIoU: 0.2934 OA: 0.7028
Per-class IoU:
class 00: 0.8774
class 01: 0.9566
class 02: 0.5919
class 03: 0.0000
class 04: 0.0072
class 05: 0.0812
class 06: 0.0131
class 07: 0.4127
class 08: 0.2440
class 09: 0.0108
class 10: 0.3772
class 11: 0.0155
class 12: 0.2271
Epoch 236/300 TrainLoss: 1.0129 Time: 4.6s LR: 0.000112
Epoch 237/300 TrainLoss: 0.9965 Time: 3.7s LR: 0.000108
Epoch 238/300 TrainLoss: 1.0122 Time: 4.8s LR: 0.000105
Epoch 239/300 TrainLoss: 1.0187 Time: 4.1s LR: 0.000102
Epoch 240/300 TrainLoss: 0.9959 Time: 4.1s LR: 0.000099
-- Validation Loss: 2.0886 mIoU: 0.3001 OA: 0.7110
Per-class IoU:
class 00: 0.8785
class 01: 0.9536
class 02: 0.5996
class 03: 0.0000
class 04: 0.0033
class 05: 0.0916
class 06: 0.0181
class 07: 0.4459
class 08: 0.2658
class 09: 0.0146
class 10: 0.3823
class 11: 0.0180
class 12: 0.2298
Epoch 241/300 TrainLoss: 1.0186 Time: 3.7s LR: 0.000095
Epoch 242/300 TrainLoss: 0.9886 Time: 4.1s LR: 0.000092
Epoch 243/300 TrainLoss: 0.9896 Time: 3.6s LR: 0.000089
Epoch 244/300 TrainLoss: 0.9730 Time: 3.6s LR: 0.000086
Epoch 245/300 TrainLoss: 0.9998 Time: 4.8s LR: 0.000083
-- Validation Loss: 2.1042 mIoU: 0.2955 OA: 0.7090
Per-class IoU:
class 00: 0.8765
class 01: 0.9551
class 02: 0.5949
class 03: 0.0000
class 04: 0.0016
class 05: 0.0761
class 06: 0.0147
class 07: 0.4373
class 08: 0.2583
class 09: 0.0000
class 10: 0.3727
class 11: 0.0156
class 12: 0.2385
Epoch 246/300 TrainLoss: 1.0131 Time: 4.3s LR: 0.000080
Epoch 247/300 TrainLoss: 0.9858 Time: 3.8s LR: 0.000078
Epoch 248/300 TrainLoss: 1.0242 Time: 4.3s LR: 0.000075
Epoch 249/300 TrainLoss: 0.9810 Time: 4.1s LR: 0.000072
Epoch 250/300 TrainLoss: 0.9906 Time: 4.4s LR: 0.000069
-- Validation Loss: 2.0986 mIoU: 0.3001 OA: 0.7087
Per-class IoU:
class 00: 0.8778
class 01: 0.9567
class 02: 0.5917
class 03: 0.0000
class 04: 0.0024
class 05: 0.0893
class 06: 0.0165
class 07: 0.4428
class 08: 0.2761
class 09: 0.0138
class 10: 0.3777
class 11: 0.0226
class 12: 0.2336
Epoch 251/300 TrainLoss: 1.0195 Time: 4.0s LR: 0.000067
Epoch 252/300 TrainLoss: 0.9886 Time: 4.1s LR: 0.000064
Epoch 253/300 TrainLoss: 0.9617 Time: 3.8s LR: 0.000061
Epoch 254/300 TrainLoss: 0.9751 Time: 4.0s LR: 0.000059
Epoch 255/300 TrainLoss: 0.9858 Time: 4.1s LR: 0.000056
-- Validation Loss: 2.0987 mIoU: 0.2974 OA: 0.7060
Per-class IoU:
class 00: 0.8753
class 01: 0.9501
class 02: 0.5905
class 03: 0.0000
class 04: 0.0024
class 05: 0.0858
class 06: 0.0159
class 07: 0.4244
class 08: 0.2862
class 09: 0.0101
class 10: 0.3758
class 11: 0.0170
class 12: 0.2326
Epoch 256/300 TrainLoss: 1.0118 Time: 4.1s LR: 0.000054
Epoch 257/300 TrainLoss: 0.9871 Time: 4.5s LR: 0.000052
Epoch 258/300 TrainLoss: 0.9856 Time: 4.2s LR: 0.000049
Epoch 259/300 TrainLoss: 0.9748 Time: 4.2s LR: 0.000047
Epoch 260/300 TrainLoss: 0.9781 Time: 3.6s LR: 0.000045
-- Validation Loss: 2.0932 mIoU: 0.2992 OA: 0.7116
Per-class IoU:
class 00: 0.8829
class 01: 0.9502
class 02: 0.5979
class 03: 0.0000
class 04: 0.0025
class 05: 0.0815
class 06: 0.0143
class 07: 0.4143
class 08: 0.2868
class 09: 0.0112
class 10: 0.3870
class 11: 0.0188
class 12: 0.2425
Epoch 261/300 TrainLoss: 0.9690 Time: 5.0s LR: 0.000043
Epoch 262/300 TrainLoss: 1.0058 Time: 3.5s LR: 0.000040
Epoch 263/300 TrainLoss: 0.9998 Time: 4.2s LR: 0.000038
Epoch 264/300 TrainLoss: 0.9924 Time: 4.3s LR: 0.000036
Epoch 265/300 TrainLoss: 0.9626 Time: 4.6s LR: 0.000034
-- Validation Loss: 2.1054 mIoU: 0.2948 OA: 0.7077
Per-class IoU:
class 00: 0.8774
class 01: 0.9548
class 02: 0.5917
class 03: 0.0000
class 04: 0.0017
class 05: 0.0797
class 06: 0.0133
class 07: 0.4030
class 08: 0.2669
class 09: 0.0000
class 10: 0.3829
class 11: 0.0239
class 12: 0.2366这是跑的结果
最新发布