np.stack(x_new, axis=-2)

理解np.stack在处理多维数组中的作用
np.stack(x_new,axis=-2)用于沿着倒数第二个维度堆叠x_new,创建一个新数组,其形状为(6940,45,90,a,9)。此操作在n个保留区域的m×p空间中处理每个区域的a个特征,每个区域有b个内部特征。堆叠后的数组保留了原始数据的结构和信息。

np.stack(x_new, axis=-2)

在函数中,x_new = np.stack(x_new, axis=-2) 的目的是将保留区域的 x 数组沿着倒数第二个维度进行堆叠,得到一个新的数组 x_new,该数组的维度为 (6940, 45, 90, a, 9),其中 a 是保留区域的数量。

具体来说,假设 x_new 是一个形状为 (n, m, p, a, b) 的列表,其中 nmp 是每个保留区域的维度大小,a 是保留区域的数量,b 是每个区域内部的特征数量。当使用 np.stack(x_new, axis=-2) 时,它会返回一个形状为 (n, m, p, a, b) 的新数组,其中 a 维度上的每个元素都是 x_new 列表中对应位置的数组。

import torch import torch.nn as nn import torch.nn.functional as F # 定义稀有类别列表(必须放在类外部) rare_classes = [3, 4, 6, 9, 11] # 定义在类外部 # ----------------------------- # 1. 增强型频域注意力 (EFA) # ----------------------------- class EnhancedFreqAttention(nn.Module): def __init__(self, dim, reduction=4): super().__init__() self.conv = nn.Sequential( nn.Conv1d(dim, dim//reduction, 3, padding=1), nn.GELU(), nn.Conv1d(dim//reduction, dim, 3, padding=1) ) self.attn = nn.Sequential( nn.Linear(dim, dim//reduction), nn.GELU(), nn.Linear(dim//reduction, dim) ) self.sigmoid = nn.Sigmoid() def forward(self, x): # 频域卷积注意力 conv_attn = self.conv(x.transpose(1, 2)).transpose(1, 2) # 全局上下文注意力 global_attn = torch.mean(x, dim=1, keepdim=True) # 保持维度 global_attn = self.attn(global_attn) # 组合注意力 fused_attn = self.sigmoid(conv_attn + global_attn) return x * fused_attn # ----------------------------- # 2. 多尺度频域融合块 # ----------------------------- class MultiScaleFreqBlock(nn.Module): def __init__(self, embed_dim): super().__init__() self.branch1 = nn.Sequential( nn.Linear(12, embed_dim//2), nn.GELU() ) self.branch2 = nn.Sequential( nn.Linear(12, embed_dim//2), # 两倍频域信息 nn.GELU() ) self.fuse = nn.Linear(embed_dim, embed_dim) def forward(self, x): B, N, _ = x.shape # 获取原始点数量N # 基础频域特征 fft1 = torch.fft.fft(x, dim=1) feat1 = torch.cat([fft1.real, fft1.imag], dim=-1) # 形状: [B, N, 12] b1 = self.branch1(feat1) # 高频增强特征 fft2 = torch.fft.fft(x, n=2048, dim=1) # 形状: [B, 2048, 6]复数 feat2 = torch.cat([fft2.real, fft2.imag], dim=-1) # 形状: [B, 2048, 12] # 关键修正:先处理再截断 b2_full = self.branch2(feat2) # 形状: [B, 2048, embed_dim//2] b2 = b2_full[:, :N, :] # 截取前N个点 # 融合 fused = torch.cat([b1, b2], dim=-1) # 形状: [B, N, embed_dim] return self.fuse(fused) # ----------------------------- # 3. Transformer块 (补充定义) # ----------------------------- class TransformerBlock(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, drop=0.1): super().__init__() self.norm1 = nn.LayerNorm(dim) self.attn = nn.MultiheadAttention( dim, num_heads, dropout=drop, batch_first=True ) self.norm2 = nn.LayerNorm(dim) mlp_dim = int(dim * mlp_ratio) self.mlp = nn.Sequential( nn.Linear(dim, mlp_dim), nn.GELU(), nn.Dropout(drop), nn.Linear(mlp_dim, dim), nn.Dropout(drop) ) def forward(self, x): # 残差连接1 identity = x x = self.norm1(x) x, _ = self.attn(x, x, x) x = x + identity # 残差连接2 identity = x x = self.norm2(x) x = self.mlp(x) x = x + identity return x # ----------------------------- # 4. 主网络结构:FreqFormerV8 # ----------------------------- class FreqFormerV8(nn.Module): def __init__(self, num_classes=13, embed_dim=192, depth=6, num_heads=8): super().__init__() # 空间嵌入 self.spatial_embed = nn.Linear(6, embed_dim) # 增强频域处理 self.freq_block = MultiScaleFreqBlock(embed_dim) self.efa = EnhancedFreqAttention(embed_dim) # 门控融合优化 self.fuse_gate = nn.Sequential( nn.Linear(2 * embed_dim, embed_dim), nn.ReLU(), nn.Linear(embed_dim, embed_dim), nn.Sigmoid() ) # 深度Transformer self.blocks = nn.ModuleList([ TransformerBlock(embed_dim, num_heads=num_heads) for _ in range(depth) ]) # 双分类头 self.main_head = nn.Sequential( nn.LayerNorm(embed_dim), nn.Linear(embed_dim, num_classes) ) # 辅助头专注稀有类 self.aux_head = nn.Sequential( nn.LayerNorm(embed_dim), nn.Linear(embed_dim, len(rare_classes)) ) # 初始化权重 self._init_weights() def _init_weights(self): for m in self.modules(): if isinstance(m, nn.Linear): nn.init.kaiming_normal_(m.weight) if m.bias is not None: nn.init.zeros_(m.bias) def forward(self, coords, feats=None): # 输入处理 if feats is not None: x = torch.cat([coords, feats], dim=-1) else: feats = torch.zeros_like(coords) x = torch.cat([coords, feats], dim=-1) # 空间分支 spatial_feat = self.spatial_embed(x) # 频域分支 freq_feat = self.freq_block(x) freq_feat = self.efa(freq_feat) # 门控融合 gate_input = torch.cat([spatial_feat, freq_feat], dim=-1) gate = self.fuse_gate(gate_input) fused = gate * spatial_feat + (1 - gate) * freq_feat # Transformer处理 for blk in self.blocks: fused = blk(fused) # 双输出 main_out = self.main_head(fused) aux_out = self.aux_head(fused) return main_out, aux_out if __name__ == "__main__": # 测试验证 model = FreqFormerV8(num_classes=13) coords = torch.randn(2, 1024, 3) feats = torch.randn(2, 1024, 3) main_out, aux_out = model(coords, feats) print("Main output shape:", main_out.shape) # [2, 1024, 13] print("Aux output shape:", aux_out.shape) # [2, 1024, 5]模型代码已经优化好了,# train_freqformer_v8.py """ Train script for FreqFormerV6 usage example: python train_freqformer_v6.py --data_dir <…> --batch_size 4 --num_epochs 150 """ import os, time, argparse import numpy as np import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader import torch.nn.functional as F from torch.optim.lr_scheduler import SequentialLR, LinearLR, CosineAnnealingLR # ------------------------------- # Import your V6 model (adjust path if needed) from freqformer_v8 import FreqFormerV8 # ------------------------------- # Dice loss (multi-class) def one_hot(labels, num_classes): """ labels: [N] (int) returns: [N, C] """ y = torch.eye(num_classes, device=labels.device)[labels] return y def multiclass_dice_loss(probs, labels, eps=1e-6): """ probs: [BP, C], labels: [BP] (ints) """ C = probs.shape[1] mask = (labels >= 0) if mask.sum() == 0: return probs.new_tensor(0.) probs = probs[mask] # [M, C] labels = labels[mask] gt = one_hot(labels, C) # [M, C] intersection = (probs * gt).sum(dim=0) cardinality = probs.sum(dim=0) + gt.sum(dim=0) dice = (2. * intersection + eps) / (cardinality + eps) loss = 1.0 - dice return loss.mean() # ------------------------------- # Focal Loss class FocalLoss(nn.Module): def __init__(self, alpha=None, gamma=2.0, reduction='mean'): super().__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction def forward(self, inputs, targets): ce_loss = F.cross_entropy(inputs, targets, reduction='none', weight=self.alpha) pt = torch.exp(-ce_loss) focal_loss = (1 - pt)**self.gamma * ce_loss if self.reduction == 'mean': return focal_loss.mean() elif self.reduction == 'sum': return focal_loss.sum() return focal_loss # ------------------------------- # Lovasz softmax def lovasz_grad(gt_sorted): gts = gt_sorted.sum() if gts == 0: return torch.zeros_like(gt_sorted) intersection = gts - gt_sorted.cumsum(0) union = gts + (1 - gt_sorted).cumsum(0) jaccard = 1. - intersection / union if gt_sorted.numel() > 1: jaccard[1:] = jaccard[1:] - jaccard[:-1] return jaccard def flatten_probas(probas, labels, ignore_index=-1): mask = (labels != ignore_index) if not mask.any(): return probas.new(0), labels.new(0) probas = probas[mask] labels = labels[mask] return probas, labels def lovasz_softmax(probas, labels, classes='present', ignore_index=-1): C = probas.size(1) losses = [] probas, labels = flatten_probas(probas, labels, ignore_index) if probas.numel() == 0: return probas.new_tensor(0.) for c in range(C): fg = (labels == c).float() if classes == 'present' and fg.sum() == 0: continue class_pred = probas[:, c] errors = (fg - class_pred).abs() perm = torch.argsort(errors, descending=True) fg_sorted = fg[perm] grad = lovasz_grad(fg_sorted) loss_c = torch.dot(F.relu(errors[perm]), grad) losses.append(loss_c) if len(losses) == 0: return probas.new_tensor(0.) return sum(losses) / len(losses) # ------------------------------- # Dataset (S3DIS npy layout assumed) class S3DISDatasetAug(Dataset): def __init__(self, data_dir, split='train', val_area='Area_5', num_points=1024, augment=True): self.num_points = num_points self.augment = augment and (split == 'train') self.files = [] self.rare_classes = [3,4,6,9,11] # 必须在此初始化 for f in sorted(os.listdir(data_dir)): if not f.endswith('.npy'): continue if split == 'train' and val_area in f: continue if split == 'val' and val_area not in f: continue self.files.append(os.path.join(data_dir, f)) if len(self.files) == 0: raise RuntimeError(f"No files found in {data_dir} (split={split})") # 构建文件类别缓存 self.file_class_map = {} for file_path in self.files: # 使用内存映射方式加载,避免一次性读入内存 data = np.load(file_path, mmap_mode='r') # 抽样计算类别(避免处理整个大文件) labels_sample = data[::100, 6] # 每隔100个点取一个 unique_classes = np.unique(labels_sample).tolist() self.file_class_map[file_path] = unique_classes def __len__(self): return len(self.files) def __getitem__(self, idx): if self.augment and np.random.rand() < 0.3: # 使用初始化时构建的缓存而非实时加载 rare_files = [f for f in self.files if any( c in self.file_class_map[f] for c in self.rare_classes # 关键修改 )] if rare_files: # 避免空列表 file_path = np.random.choice(rare_files) return self._load_file(file_path) # 调用_load_file加载数据 # 如果没有稀有文件,则按正常路径加载 # 默认加载当前索引的文件 return self._load_file(self.files[idx]) # 新增方法,用于从文件路径加载数据并处理 def _load_file(self, file_path): data = np.load(file_path) coords = data[:, :3].astype(np.float32) extra = data[:, 3:6].astype(np.float32) labels = data[:, 6].astype(np.int64) N = coords.shape[0] if N >= self.num_points: choice = np.random.choice(N, self.num_points, replace=False) else: choice = np.random.choice(N, self.num_points, replace=True) coords = coords[choice] extra = extra[choice] labels = labels[choice] if self.augment: theta = np.random.uniform(0, 2*np.pi) c, s = np.cos(theta), np.sin(theta) R = np.array([[c, -s, 0], [s, c, 0], [0,0,1]], dtype=np.float32) coords = coords.dot(R.T) scale = np.random.uniform(0.9,1.1) coords = coords * scale coords = coords + np.random.normal(0, 0.01, coords.shape).astype(np.float32) local_feat = np.concatenate([coords, extra], axis=1) return { 'coords': torch.from_numpy(coords).float(), 'extra': torch.from_numpy(extra).float(), 'local_feat': torch.from_numpy(local_feat).float(), 'label': torch.from_numpy(labels).long() } # ------------------------------- # Confusion matrix & IoU def compute_confusion_matrix(preds, gts, num_classes): mask = (gts >= 0) & (gts < num_classes) gt = gts[mask].astype(np.int64) pred = preds[mask].astype(np.int64) conf = np.bincount(gt*num_classes + pred, minlength=num_classes**2) return conf.reshape((num_classes, num_classes)) def compute_iou_from_conf(conf): inter = np.diag(conf) gt_sum = conf.sum(axis=1) pred_sum = conf.sum(axis=0) union = gt_sum + pred_sum - inter iou = inter / (union + 1e-10) return iou # ------------------------------- # Class weights def compute_class_weights(file_list, num_classes, method='inv_sqrt'): counts = np.zeros(num_classes, dtype=np.float64) for p in file_list: data = np.load(p, mmap_mode='r') labels = data[:,6].astype(np.int64) for c in range(num_classes): counts[c] += (labels == c).sum() counts = np.maximum(counts, 1.0) if method=='inv_freq': weights = 1.0 / counts elif method=='inv_sqrt': weights = 1.0 / np.sqrt(counts) else: weights = np.ones_like(counts) weights = weights / weights.sum() * num_classes return torch.from_numpy(weights.astype(np.float32)) # ------------------------------- # Main training loop def main(): parser = argparse.ArgumentParser() parser.add_argument('--data_dir', default='/root/autodl-tmp/pointcloud_seg/data/S3DIS_new/processed_npy') parser.add_argument('--save_dir', default='./checkpoints_v6') parser.add_argument('--batch_size', type=int, default=4) parser.add_argument('--num_epochs', type=int, default=300) parser.add_argument('--num_points', type=int, default=1024) parser.add_argument('--num_classes', type=int, default=13) parser.add_argument('--lr', type=float, default=1e-3) parser.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu') parser.add_argument('--use_class_weights', action='store_true') parser.add_argument('--use_lovasz', action='store_true') parser.add_argument('--warmup_epochs', type=int, default=5) parser.add_argument('--num_workers', type=int, default=8) parser.add_argument('--grad_clip', type=float, default=1.0) parser.add_argument('--use_focal', action='store_true', help='Use Focal Loss instead of CrossEntropy') parser.add_argument('--focal_gamma', type=float, default=2.0, help='Gamma parameter for Focal Loss') args = parser.parse_args() os.makedirs(args.save_dir, exist_ok=True) device = torch.device(args.device) # Dataset train_ds = S3DISDatasetAug(args.data_dir, split='train', num_points=args.num_points, augment=True) val_ds = S3DISDatasetAug(args.data_dir, split='val', num_points=args.num_points, augment=False) train_loader = DataLoader(train_ds, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, drop_last=True) val_loader = DataLoader(val_ds, batch_size=1, shuffle=False, num_workers=max(1, args.num_workers//2)) # Class weights class_weights = None if args.use_class_weights: print("Computing class weights...") class_weights = compute_class_weights(train_ds.files, args.num_classes).to(device) print("class weights:", class_weights.cpu().numpy()) # Model model = FreqFormerV8(num_classes=args.num_classes) if torch.cuda.device_count() > 1 and args.device.startswith('cuda'): print("Using DataParallel on devices:", list(range(torch.cuda.device_count()))) model = torch.nn.DataParallel(model) model = model.to(device) # Focal Loss focal_criterion = None if args.use_focal: print(f"Using Focal Loss with gamma={args.focal_gamma}") alpha = class_weights if class_weights is not None else torch.ones(args.num_classes).to(device) rare_classes = [3,4,6,9,11] # adjust as needed for c in rare_classes: if c < len(alpha): alpha[c] *= 25.0 focal_criterion = FocalLoss(alpha=alpha, gamma=args.focal_gamma).to(device) # 替换为新的组合调度器 optimizer = torch.optim.AdamW( model.parameters(), lr=args.lr, weight_decay=1e-4 ) # 预热阶段调度器 warmup = torch.optim.lr_scheduler.LinearLR( optimizer, start_factor=0.01, end_factor=1.0, total_iters=args.warmup_epochs ) # 余弦退火调度器 cosine = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=max(1, args.num_epochs - args.warmup_epochs) ) # 组合调度器 scheduler = torch.optim.lr_scheduler.SequentialLR( optimizer, schedulers=[warmup, cosine], milestones=[args.warmup_epochs] ) best_miou = 0.0 # Training loop for epoch in range(args.num_epochs): model.train() t0 = time.time() running_loss = 0.0 for batch in train_loader: coords = batch['coords'].to(device) extra = batch['extra'].to(device) labels = batch['label'].to(device) optimizer.zero_grad() logits = model(coords, extra) B,N,C = logits.shape logits_flat = logits.view(-1,C) labels_flat = labels.view(-1) probs = F.softmax(logits_flat, dim=-1) dice = multiclass_dice_loss(probs, labels_flat) if args.use_focal and focal_criterion is not None: ce = focal_criterion(logits_flat, labels_flat) else: ce = F.cross_entropy(logits_flat, labels_flat, weight=class_weights, ignore_index=-1) lov = lovasz_softmax(probs, labels_flat) if args.use_lovasz else logits_flat.new_tensor(0.0) loss = 0.5*ce + 2.0*dice + 0.3*lov loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip) optimizer.step() running_loss += loss.item() scheduler.step() avg_loss = running_loss / max(1, len(train_loader)) t1 = time.time() print(f"Epoch {epoch+1}/{args.num_epochs} TrainLoss: {avg_loss:.4f} Time: {(t1-t0):.1f}s LR: {optimizer.param_groups[0]['lr']:.6f}") # Validation if (epoch+1)%5==0 or (epoch+1)==args.num_epochs: model.eval() conf = np.zeros((args.num_classes, args.num_classes), dtype=np.int64) tot_loss = 0.0 cnt = 0 with torch.no_grad(): for batch in val_loader: coords = batch['coords'].to(device) extra = batch['extra'].to(device) labels = batch['label'].to(device) logits = model(coords, extra) B,N,C = logits.shape logits_flat = logits.view(-1,C) labels_flat = labels.view(-1) if class_weights is not None: loss_ce = F.cross_entropy(logits_flat, labels_flat, weight=class_weights, ignore_index=-1) else: loss_ce = F.cross_entropy(logits_flat, labels_flat, ignore_index=-1) probs = F.softmax(logits_flat, dim=-1) dice_val = multiclass_dice_loss(probs, labels_flat) lov_val = lovasz_softmax(probs, labels_flat) if args.use_lovasz else logits_flat.new_tensor(0.0) loss_val = 0.5*loss_ce + 2.0*dice_val + 0.3*lov_val tot_loss += loss_val.item() preds = logits.argmax(dim=-1).cpu().numpy().reshape(-1) gts = labels.cpu().numpy().reshape(-1) conf += compute_confusion_matrix(preds, gts, args.num_classes) cnt += 1 mean_loss = tot_loss / max(1, cnt) iou = compute_iou_from_conf(conf) miou = np.nanmean(iou) oa = np.diag(conf).sum() / (conf.sum() + 1e-12) print(f"-- Validation Loss: {mean_loss:.4f} mIoU: {miou:.4f} OA: {oa:.4f}") print("Per-class IoU:") for cid, v in enumerate(iou): print(f" class {cid:02d}: {v:.4f}") if miou > best_miou: best_miou = miou path = os.path.join(args.save_dir, f'best_epoch_{epoch+1:03d}miou.pth') state = { 'epoch': epoch+1, 'best_miou': best_miou, 'model_state_dict': model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict() } torch.save(state, path) print("Saved best:", path) # Final save final_path = os.path.join(args.save_dir, f'final_epoch_{args.num_epochs:03d}miou.pth') state = { 'epoch': args.num_epochs, 'best_miou': best_miou, 'model_state_dict': model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict() } torch.save(state, final_path) print("Training finished. Final saved to:", final_path) if __name__ == "__main__": main()训练代码还需要怎么优化呢?要真正的能提点上来
10-24
#!/usr/bin/env python3 import os import math import json import actionlib import control_msgs.msg import numpy as np from controller import ArmController from gazebo_msgs.msg import ModelStates import rospy from pyquaternion import Quaternion as PyQuaternion import numpy as np from gazebo_ros_link_attacher.srv import SetStatic, SetStaticRequest, SetStaticResponse from gazebo_ros_link_attacher.srv import Attach, AttachRequest, AttachResponse PKG_PATH = os.path.dirname(os.path.abspath(__file__)) MODELS_INFO = { "X1-Y2-Z1": { "home": [0.472098, -0.415840, 0.789471] }, "X2-Y2-Z2": { "home": [0.480013, -0.853908, 0.789471] }, "X1-Y3-Z2": { "home": [0.475937, -0.634054, 0.789471] }, "X1-Y2-Z2": { "home": [0.678948, -0.424059, 0.789471] }, "X1-Y2-Z2-CHAMFER": { "home": [0.877965, -0.399189, 0.789471] }, "X1-Y4-Z2": { "home": [0.284295, -0.864617, 0.789471] }, "X1-Y1-Z2": { "home": [0.275572, -0.413132, 0.789471] }, "X1-Y2-Z2-TWINFILLET": { "home": [0.272887, -0.627295, 0.789471] }, "X1-Y3-Z2-FILLET": { "home": [0.682787, -0.635804, 0.789471] }, "X1-Y4-Z1": { "home": [0.881767, -0.622449, 0.789471] }, "X2-Y2-Z2-FILLET": { "home": [0.688769, -0.867164, 0.789471] } } for model, model_info in MODELS_INFO.items(): pass #MODELS_INFO[model]["home"] = model_info["home"] + np.array([0.0, 0.10, 0.0]) for model, info in MODELS_INFO.items(): model_json_path = os.path.join(PKG_PATH, "..", "models", f"lego_{model}", "model.json") model_json_path = os.path.abspath(model_json_path) # check path exists if not os.path.exists(model_json_path): raise FileNotFoundError(f"Model file {model_json_path} not found") model_json = json.load(open(model_json_path, "r")) corners = np.array(model_json["corners"]) size_x = (np.max(corners[:, 0]) - np.min(corners[:, 0])) size_y = (np.max(corners[:, 1]) - np.min(corners[:, 1])) size_z = (np.max(corners[:, 2]) - np.min(corners[:, 2])) print(f"{model}: {size_x:.3f} x {size_y:.3f} x {size_z:.3f}") MODELS_INFO[model]["size"] = (size_x, size_y, size_z) BUILDING = json.load(open(os.path.join(PKG_PATH, "building.json"), "r")) # Compensate for the interlocking height INTERLOCKING_OFFSET = 0.019 SAFE_X = -0.40 SAFE_Y = -0.13 SURFACE_Z = 0.774 # Resting orientation of the end effector DEFAULT_QUAT = PyQuaternion(axis=(0, 1, 0), angle=math.pi) # Resting position of the end effector DEFAULT_POS = (-0.1, -0.2, 1.2) DEFAULT_PATH_TOLERANCE = control_msgs.msg.JointTolerance() DEFAULT_PATH_TOLERANCE.name = "path_tolerance" DEFAULT_PATH_TOLERANCE.velocity = 10 def get_gazebo_model_name(model_name, vision_model_pose): """ Get the name of the model inside gazebo. It is needed for link attacher plugin. """ models = rospy.wait_for_message("/gazebo/model_states", ModelStates, timeout=None) epsilon = 0.05 for gazebo_model_name, model_pose in zip(models.name, models.pose): if model_name not in gazebo_model_name: continue # Get everything inside a square of side epsilon centered in vision_model_pose ds = abs(model_pose.position.x - vision_model_pose.position.x) + abs(model_pose.position.y - vision_model_pose.position.y) if ds <= epsilon: return gazebo_model_name raise ValueError(f"Model {model_name} at position {vision_model_pose.position.x} {vision_model_pose.position.y} was not found!") def get_model_name(gazebo_model_name): return gazebo_model_name.replace("lego_", "").split("_", maxsplit=1)[0] def get_legos_pos(vision=False, strip=True): #get legos position reading vision topic if vision: legos = rospy.wait_for_message("/lego_detections", ModelStates, timeout=None) else: models = rospy.wait_for_message("/gazebo/model_states", ModelStates, timeout=None) legos = ModelStates() for name, pose in zip(models.name, models.pose): if "X" not in name: continue if strip: name = get_model_name(name) legos.name.append(name) legos.pose.append(pose) return [(lego_name, lego_pose) for lego_name, lego_pose in zip(legos.name, legos.pose)] def straighten(model_pose, gazebo_model_name): x = model_pose.position.x y = model_pose.position.y z = model_pose.position.z model_quat = PyQuaternion( x=model_pose.orientation.x, y=model_pose.orientation.y, z=model_pose.orientation.z, w=model_pose.orientation.w) model_size = MODELS_INFO[get_model_name(gazebo_model_name)]["size"] """ Calculate approach quaternion and target quaternion """ facing_direction = get_axis_facing_camera(model_quat) approach_angle = get_approach_angle(model_quat, facing_direction) print(f"Lego is facing {facing_direction}") print(f"Approaching at {approach_angle:.2f} deg") # Calculate approach quat approach_quat = get_approach_quat(facing_direction, approach_angle) # Get above the object controller.move_to(x, y, target_quat=approach_quat) # Calculate target quat regrip_quat = DEFAULT_QUAT if facing_direction == (1, 0, 0) or facing_direction == (0, 1, 0): # Side target_quat = DEFAULT_QUAT pitch_angle = -math.pi/2 + 0.2 if abs(approach_angle) < math.pi/2: target_quat = target_quat * PyQuaternion(axis=(0, 0, 1), angle=math.pi/2) else: target_quat = target_quat * PyQuaternion(axis=(0, 0, 1), angle=-math.pi/2) target_quat = PyQuaternion(axis=(0, 1, 0), angle=pitch_angle) * target_quat if facing_direction == (0, 1, 0): regrip_quat = PyQuaternion(axis=(0, 0, 1), angle=math.pi/2) * regrip_quat elif facing_direction == (0, 0, -1): """ Pre-positioning """ controller.move_to(z=z, target_quat=approach_quat) close_gripper(gazebo_model_name, model_size[0]) tmp_quat = PyQuaternion(axis=(0, 0, 1), angle=2*math.pi/6) * DEFAULT_QUAT controller.move_to(SAFE_X, SAFE_Y, z+0.05, target_quat=tmp_quat, z_raise=0.1) # Move to safe position controller.move_to(z=z) open_gripper(gazebo_model_name) approach_quat = tmp_quat * PyQuaternion(axis=(1, 0, 0), angle=math.pi/2) target_quat = approach_quat * PyQuaternion(axis=(0, 0, 1), angle=-math.pi) # Add a yaw rotation of 180 deg regrip_quat = tmp_quat * PyQuaternion(axis=(0, 0, 1), angle=math.pi) else: target_quat = DEFAULT_QUAT target_quat = target_quat * PyQuaternion(axis=(0, 0, 1), angle=-math.pi/2) """ Grip the model """ if facing_direction == (0, 0, 1) or facing_direction == (0, 0, -1): closure = model_size[0] z = SURFACE_Z + model_size[2] / 2 elif facing_direction == (1, 0, 0): closure = model_size[1] z = SURFACE_Z + model_size[0] / 2 elif facing_direction == (0, 1, 0): closure = model_size[0] z = SURFACE_Z + model_size[1] / 2 controller.move_to(z=z, target_quat=approach_quat) close_gripper(gazebo_model_name, closure) """ Straighten model if needed """ if facing_direction != (0, 0, 1): z = SURFACE_Z + model_size[2]/2 controller.move_to(z=z+0.05, target_quat=target_quat, z_raise=0.1) controller.move(dz=-0.05) open_gripper(gazebo_model_name) # Re grip the model controller.move_to(z=z, target_quat=regrip_quat, z_raise=0.1) close_gripper(gazebo_model_name, model_size[0]) def close_gripper(gazebo_model_name, closure=0): set_gripper(0.81-closure*10) rospy.sleep(0.5) # Create dynamic joint if gazebo_model_name is not None: req = AttachRequest() req.model_name_1 = gazebo_model_name req.link_name_1 = "link" req.model_name_2 = "robot" req.link_name_2 = "wrist_3_link" attach_srv.call(req) def open_gripper(gazebo_model_name=None): set_gripper(0.0) # Destroy dynamic joint if gazebo_model_name is not None: req = AttachRequest() req.model_name_1 = gazebo_model_name req.link_name_1 = "link" req.model_name_2 = "robot" req.link_name_2 = "wrist_3_link" detach_srv.call(req) def set_model_fixed(model_name): req = AttachRequest() req.model_name_1 = model_name req.link_name_1 = "link" req.model_name_2 = "ground_plane" req.link_name_2 = "link" attach_srv.call(req) req = SetStaticRequest() print("SETTING {} TO STATIC".format(model_name)) req.model_name = model_name req.link_name = "link" req.set_static = True setstatic_srv.call(req) def get_approach_quat(facing_direction, approach_angle): quat = DEFAULT_QUAT if facing_direction == (0, 0, 1): pitch_angle = 0 yaw_angle = 0 elif facing_direction == (1, 0, 0) or facing_direction == (0, 1, 0): pitch_angle = + 0.2 if abs(approach_angle) < math.pi/2: yaw_angle = math.pi/2 else: yaw_angle = -math.pi/2 elif facing_direction == (0, 0, -1): pitch_angle = 0 yaw_angle = 0 else: raise ValueError(f"Invalid model state {facing_direction}") quat = quat * PyQuaternion(axis=(0, 1, 0), angle=pitch_angle) quat = quat * PyQuaternion(axis=(0, 0, 1), angle=yaw_angle) quat = PyQuaternion(axis=(0, 0, 1), angle=approach_angle+math.pi/2) * quat return quat def get_axis_facing_camera(quat): axis_x = np.array([1, 0, 0]) axis_y = np.array([0, 1, 0]) axis_z = np.array([0, 0, 1]) new_axis_x = quat.rotate(axis_x) new_axis_y = quat.rotate(axis_y) new_axis_z = quat.rotate(axis_z) # get angle between new_axis and axis_z angle = np.arccos(np.clip(np.dot(new_axis_z, axis_z), -1.0, 1.0)) % np.pi # get if model is facing up, down or sideways if angle < np.pi / 3: return 0, 0, 1 elif angle < np.pi / 3 * 2 * 1.2: if abs(new_axis_x[2]) > abs(new_axis_y[2]): return 1, 0, 0 else: return 0, 1, 0 #else: # raise Exception(f"Invalid axis {new_axis_x}") else: return 0, 0, -1 def get_approach_angle(model_quat, facing_direction):#get gripper approach angle if facing_direction == (0, 0, 1): return model_quat.yaw_pitch_roll[0] - math.pi/2 #rotate gripper elif facing_direction == (1, 0, 0) or facing_direction == (0, 1, 0): axis_x = np.array([0, 1, 0]) axis_y = np.array([-1, 0, 0]) new_axis_z = model_quat.rotate(np.array([0, 0, 1])) #get z axis of lego # get angle between new_axis and axis_x dot = np.clip(np.dot(new_axis_z, axis_x), -1.0, 1.0) #sin angle between lego z axis and x axis in fixed frame det = np.clip(np.dot(new_axis_z, axis_y), -1.0, 1.0) #cos angle between lego z axis and x axis in fixed frame return math.atan2(det, dot) #get angle between lego z axis and x axis in fixed frame elif facing_direction == (0, 0, -1): return -(model_quat.yaw_pitch_roll[0] - math.pi/2) % math.pi - math.pi else: raise ValueError(f"Invalid model state {facing_direction}") def set_gripper(value): goal = control_msgs.msg.GripperCommandGoal() goal.command.position = value # From 0.0 to 0.8 goal.command.max_effort = -1 # # Do not limit the effort action_gripper.send_goal_and_wait(goal, rospy.Duration(10)) return action_gripper.get_result() def build(building, center=(0, 0, 0)): center = np.array(center) + (0, 0, 0.057/2) legos = get_legos_pos(vision=False, strip=False) legos.sort(reverse=False, key=lambda a: (a[1].position.x, a[1].position.y)) def find_model(model_name): for i, (name, pose) in enumerate(legos): if get_model_name(name) == model_name: legos.pop(i) return name, pose controller.move_to(*DEFAULT_POS, DEFAULT_QUAT) for layer_name, layer in building.items(): for block in layer: model_name = block["name"] target_pos = (block["x"], block["y"], block["z"]) + center target_quat = PyQuaternion(1,0,0,0) * DEFAULT_QUAT gazebo_model_name, model_pose = find_model(model_name) straighten(model_pose, gazebo_model_name) controller.move_to(z=center[2] + 0.1, blocking=False) controller.move_to(x=target_pos[0], y=target_pos[1], target_quat=target_quat) controller.move_to(z=target_pos[2]) set_model_fixed(gazebo_model_name) open_gripper(gazebo_model_name) controller.move_to(z=center[2] + 0.1, blocking=False) center[2] += 0.057 - INTERLOCKING_OFFSET if __name__ == "__main__": print("Initializing kinematics node") rospy.init_node("send_joints") controller = ArmController() # Create an action client for the gripper action_gripper = actionlib.SimpleActionClient( "/gripper_controller/gripper_cmd", control_msgs.msg.GripperCommandAction ) print("Waiting for gripper controller action") action_gripper.wait_for_server() setstatic_srv = rospy.ServiceProxy("/link_attacher_node/setstatic", SetStatic) attach_srv = rospy.ServiceProxy("/link_attacher_node/attach", Attach) detach_srv = rospy.ServiceProxy("/link_attacher_node/detach", Attach) setstatic_srv.wait_for_service() attach_srv.wait_for_service() detach_srv.wait_for_service() controller.move_to(*DEFAULT_POS, DEFAULT_QUAT) print("Waiting for models detection") rospy.sleep(0.5) legos = get_legos_pos(vision=False) legos.sort(reverse=False, key=lambda a: (a[1].position.x, a[1].position.y)) build(BUILDING, center=(0, 0, SURFACE_Z)) for model_name, model_pose in legos: open_gripper() try: model_home = MODELS_INFO[model_name]["home"] model_size = MODELS_INFO[model_name]["size"] except ValueError as e: print(f"Model name {model_name} was not recognized!") continue # Get actual model_name at model xyz coordinates try: gazebo_model_name = get_gazebo_model_name(model_name, model_pose) except ValueError as e: print(e) continue # Straighten lego straighten(*model_pose, gazebo_model_name) controller.move(dz=0.15) """ Go to destination """ x, y, z = model_home z += model_size[2] / 2 +0.004 print(f"Moving model {model_name} to {x} {y} {z}") controller.move_to(x, y, target_quat=DEFAULT_QUAT * PyQuaternion(axis=[0, 0, 1], angle=math.pi / 4)) # Lower the object and release controller.move_to(x, y, z) set_model_fixed(gazebo_model_name) open_gripper(gazebo_model_name) controller.move(dz=0.15) if controller.gripper_pose[0][1] > -0.3 and controller.gripper_pose[0][0] > 0: controller.move_to(*DEFAULT_POS, DEFAULT_QUAT) # increment z in order to stack lego correctly MODELS_INFO[model_name]["home"][2] += model_size[2] - INTERLOCKING_OFFSET print("Moving to DEFAULT_POS") controller.move_to(*DEFAULT_POS, DEFAULT_QUAT) open_gripper() rospy.sleep(0.4)
10-14
import numpy as np import pandas as pd import torch import torch.nn as nn import torch.optim as optim from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error, r2_score from torch.utils.data import DataLoader, TensorDataset import matplotlib.pyplot as plt from tqdm import tqdm # 固定随机种子,保证复现性 torch.manual_seed(42) np.random.seed(42) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ------------------------------- 1. 数据加载与预处理 ------------------------------- print("加载数据...") data = pd.read_csv("time_aware_clusters-time step.csv") print("原始数据形状: ", data.shape) seq_len = 90 input_columns = [1, 2, 4] # 控制变量 target_column = 5 # 构造序列数据(包含滞后特征) def create_sequences(df, input_cols, target_col, window_size, lag_steps=3): full_X = df.iloc[:, input_cols].values full_y = df.iloc[:, target_col].values.reshape(-1, 1) full_X[:, 1] = np.log10(full_X[:, 1] + 1e-8) # 滞后特征 y_lagged_list = [] for lag in range(1, lag_steps + 1): y_lag = np.concatenate([np.full((lag, 1), np.nan), full_y[:-lag]]) y_lagged_list.append(y_lag) y_lagged = np.hstack(y_lagged_list) full_X_extended = np.hstack([y_lagged, full_X]) valid_idx = np.arange(lag_steps + window_size - 1, len(full_X_extended)) X, y = [], [] for idx in valid_idx: X.append(full_X_extended[idx - window_size + 1:idx + 1]) y.append(full_y[idx]) X, y = np.array(X), np.array(y) mask = ~np.isnan(X).any(axis=(1, 2)) return X[mask], y[mask] train_data, val_data, test_data = np.split(data, [int(0.8 * len(data)), int(0.9 * len(data))]) X_train, y_train = create_sequences(train_data, input_columns, target_column, seq_len) X_val, y_val = create_sequences(val_data, input_columns, target_column, seq_len) X_test, y_test = create_sequences(test_data, input_columns, target_column, seq_len) # 归一化(滞后特征也归一化) scaler_X = MinMaxScaler().fit(X_train.reshape(-1, X_train.shape[2])) scaler_y = MinMaxScaler().fit(y_train) scaler_u = MinMaxScaler().fit(data.iloc[:, input_columns].values) def normalize(X, scaler): orig_shape = X.shape X_flat = X.reshape(-1, orig_shape[2]) X_norm = scaler.transform(X_flat) return X_norm.reshape(orig_shape) X_train = torch.tensor(normalize(X_train, scaler_X), dtype=torch.float32) y_train = torch.tensor(scaler_y.transform(y_train), dtype=torch.float32) X_val = torch.tensor(normalize(X_val, scaler_X), dtype=torch.float32) y_val = torch.tensor(scaler_y.transform(y_val), dtype=torch.float32) X_test = torch.tensor(normalize(X_test, scaler_X), dtype=torch.float32) # ------------------------------- 2. 改进版模型定义 ------------------------------- class PhysicsLSTM(nn.Module): def __init__(self, input_dim, hidden_dim=121): super().__init__() self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers=2, batch_first=True, dropout=0.2) self.fc = nn.Sequential( nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, 1)) # 改为3层结构 self.physics_net = nn.Sequential( nn.Linear(3, 16), nn.ReLU(), nn.Linear(16, 1)) def forward(self, x): lstm_out, _ = self.lstm(x) pred = self.fc(lstm_out[:, -1, :]) physics = self.physics_net(x[:, -1, -3:]) return pred + 0.1*physics model = PhysicsLSTM(X_train.shape[2]).to(device) model.load_state_dict(torch.load('final_model.pth')) model.eval() # ------------------------------- 3. 改进MPC控制器 ------------------------------- class ImprovedMPCController: def __init__(self, model, scaler_X, scaler_y, scaler_u, control_cols, horizon=15, Q=25, Q_delta=15, Q_integral=5, R=0.8, du_max=0.02): self.model = model self.scaler_X = scaler_X self.scaler_y = scaler_y self.scaler_u = scaler_u self.control_cols = control_cols self.horizon = horizon self.Q = Q self.Q_delta = Q_delta self.Q_integral = Q_integral self.R = R self.du_max = du_max self.u_bounds = np.array([[0.1, 0.1, 0.1], [0.9, 0.9, 0.9]]) self.u_prev = np.array([0.5, 0.5, 0.5]) self.integral_error = 0 self.optimizer = optim.Adam self.lr = 0.1 self.iterations = 200 def batch_predict(self, init_seq, u_seq): seq_batch = np.repeat(init_seq[np.newaxis, :, :], self.horizon, axis=0) preds = [] for t in range(self.horizon): u = np.clip(u_seq[t], self.u_bounds[0], self.u_bounds[1]) seq_batch[:, -1, self.control_cols] = u with torch.no_grad(): pred = self.model(torch.tensor(seq_batch, dtype=torch.float32).to(device)).cpu().numpy() preds.append(pred[:, 0]) if t != self.horizon - 1: seq_batch[:, :-1, :] = seq_batch[:, 1:, :] seq_batch[:, -1, :3] = np.roll(seq_batch[:, -2, :3], -1) seq_batch[:, -1, 2] = pred[:, 0] return np.stack(preds, axis=1).diagonal() def optimize(self, current_seq, target_seq): u_init = np.tile(self.u_prev, (self.horizon, 1)) u_vars = torch.tensor(u_init, dtype=torch.float32, requires_grad=True, device=device) optimizer = self.optimizer([u_vars], lr=self.lr) best_loss = float('inf') best_u = u_init[0].copy() target_tensor = torch.tensor(target_seq[:self.horizon], dtype=torch.float32, device=device) for iter in range(self.iterations): optimizer.zero_grad() # 预测 preds_norm = self.batch_predict(current_seq, u_vars.detach().cpu().numpy()) preds_tensor = torch.tensor(preds_norm, dtype=torch.float32, device=device) # 反归一化 y_pred = torch.tensor(self.scaler_y.inverse_transform(preds_tensor.cpu().numpy().reshape(-1, 1)), dtype=torch.float32, device=device) y_target = torch.tensor(self.scaler_y.inverse_transform(target_tensor.cpu().numpy().reshape(-1, 1)), dtype=torch.float32, device=device) # 计算各项损失 track_loss = torch.mean((y_pred - y_target) ** 2) delta_loss = torch.mean((y_pred[1:] - y_pred[:-1] - (y_target[1:] - y_target[:-1])) ** 2) control_loss = torch.mean((u_vars[1:] - u_vars[:-1]) ** 2) # 积分项 self.integral_error += (y_pred[0] - y_target[0]).item() integral_loss = torch.tensor(self.integral_error ** 2, device=device) # 综合损失函数 loss = (self.Q * track_loss + self.Q_delta * delta_loss + self.R * control_loss + self.Q_integral * integral_loss) loss.backward() optimizer.step() # 约束处理 with torch.no_grad(): # 控制变量平滑 for t in range(1, self.horizon): delta_u = u_vars[t] - u_vars[t - 1] delta_u = torch.clamp(delta_u, -self.du_max, self.du_max) u_vars[t] = u_vars[t - 1] + delta_u # 边界约束 u_vars.data = torch.clamp(u_vars, 0.1, 0.9) # 更新最佳控制 if loss.item() < best_loss: best_loss = loss.item() best_u = u_vars[0].detach().cpu().numpy() self.u_prev = best_u.copy() return best_u # ------------------------------- 4. 改进目标轨迹生成 ------------------------------- def generate_smooth_target(length, start_temp=552.7, end_temp=553.5, max_change=0.15): x = np.linspace(0, 1, length) # 使用S型曲线过渡 transition = 1 / (1 + np.exp(-10 * (x - 0.5))) raw_target = start_temp + (end_temp - start_temp) * transition # 限制最大变化率 for i in range(1, len(raw_target)): delta = raw_target[i] - raw_target[i - 1] if abs(delta) > max_change: raw_target[i] = raw_target[i - 1] + np.sign(delta) * max_change return scaler_y.transform(raw_target.reshape(-1, 1)).flatten() # ------------------------------- 5. 改进可视化 ------------------------------- def plot_improved_results(target_actual, preds_actual, controls_actual): plt.figure(figsize=(16, 8)) # 主图 - 温度跟踪 plt.subplot(2, 1, 1) plt.plot(target_actual, 'r--', linewidth=2, label='目标轨迹') plt.plot(preds_actual, 'b-', linewidth=1.5, label='MPC预测') # 计算并绘制误差带 error = np.abs(preds_actual.flatten() - target_actual.flatten()) plt.fill_between(range(len(preds_actual)), preds_actual.flatten() - error, preds_actual.flatten() + error, color='blue', alpha=0.1) plt.ylabel('温度 (K)') plt.title('改进后的温度控制效果') plt.legend() plt.grid(True, linestyle='--', alpha=0.7) # 控制变量图 plt.subplot(2, 1, 2) for i in range(controls_actual.shape[1]): plt.plot(controls_actual[:, i], label=f'控制变量 {i + 1}') plt.xlabel('时间步') plt.ylabel('控制量') plt.legend() plt.grid(True, linestyle='--', alpha=0.7) plt.tight_layout() plt.show() # ------------------------------- 6. 主控制仿真程序 ------------------------------- def run_improved_mpc_simulation(): control_cols = [3, 4, 5] # 根据实际数据调整 mpc = ImprovedMPCController(model, scaler_X, scaler_y, scaler_u, control_cols) current_seq = X_test[0].numpy().copy() preds, controls = [], [] steps = min(200, len(X_test) - mpc.horizon) target_seq = generate_smooth_target(steps + mpc.horizon) for i in tqdm(range(steps), desc="MPC控制进度"): target = target_seq[i:i + mpc.horizon] try: u_opt_norm = mpc.optimize(current_seq, target) pred_norm = mpc.batch_predict(current_seq, np.tile(u_opt_norm, (mpc.horizon, 1)))[0] except Exception as e: print(f"第{i}步出错: {str(e)}") break preds.append(pred_norm) controls.append(u_opt_norm) new_seq = current_seq[1:] new_row = current_seq[-1].copy() new_row[:3] = np.roll(new_row[:3], -1) new_row[2] = pred_norm new_row[control_cols] = u_opt_norm current_seq = np.vstack([new_seq, new_row]) preds_actual = scaler_y.inverse_transform(np.array(preds).reshape(-1, 1)) controls_actual = scaler_u.inverse_transform(np.array(controls)) target_actual = scaler_y.inverse_transform(target_seq[:steps].reshape(-1, 1)) r2 = r2_score(target_actual, preds_actual) rmse = np.sqrt(mean_squared_error(target_actual, preds_actual)) print(f"\n最终R&sup2;: {r2:.4f}, RMSE: {rmse:.4f}") # 改进的可视化 plot_improved_results(target_actual, preds_actual, controls_actual) # ------------------------------- 运行 ------------------------------- if __name__ == "__main__": run_improved_mpc_simulation()修改代码,加强其运行效果
06-17
import numpy as np import open3d as o3d from scipy.spatial.transform import Rotation as R from enum import Enum import trimesh from sklearn.cluster import DBSCAN from scipy.interpolate import make_interp_spline import networkx as nx from rtree import index import csv import os from tqdm import tqdm # 新增进度条,提升用户体验 # OMPL兼容支持 try: from ompl import base as ob from ompl import geometric as og OMPL_AVAILABLE = True except ImportError: OMPL_AVAILABLE = False print("警告:OMPL库未安装,自动切换为优化后的简化RRT*") # -------------------------- 1. 枚举与全局配置 -------------------------- class WorkpieceMaterial(Enum): CARBON_STEEL = "碳钢" STAINLESS_STEEL = "不锈钢" ALUMINUM = "铝合金" class RobotBrand(Enum): FANUC = "发那科" ABB = "ABB" KUKA = "库卡" WELD_PROCESS_DB = { (WorkpieceMaterial.CARBON_STEEL, 3, 1.0): ((120, 150), (18, 20), (0.05, 0.08)), (WorkpieceMaterial.CARBON_STEEL, 6, 1.2): ((150, 190), (20, 24), (0.04, 0.06)), (WorkpieceMaterial.CARBON_STEEL, 10, 1.2): ((190, 230), (24, 28), (0.03, 0.05)), (WorkpieceMaterial.STAINLESS_STEEL, 3, 1.0): ((100, 130), (17, 19), (0.04, 0.07)), (WorkpieceMaterial.STAINLESS_STEEL, 6, 1.2): ((130, 170), (19, 22), (0.03, 0.05)), (WorkpieceMaterial.ALUMINUM, 3, 1.2): ((140, 180), (20, 22), (0.06, 0.09)), (WorkpieceMaterial.ALUMINUM, 6, 1.6): ((180, 220), (22, 25), (0.05, 0.07)), } # -------------------------- 2. 点云预处理(大点数加速+局部密度自适应) -------------------------- class PointCloudProcessor: def __init__(self, pcd_file_path: str, workpiece_material: WorkpieceMaterial, wire_diameter: float = 1.2): self.pcd = self.load_point_cloud(pcd_file_path) self.filtered_pcd = None self.weld_seam_points = None self.material = workpiece_material self.wire_diameter = wire_diameter self.voxel_size = self._auto_calculate_voxel_size() self.max_point_num = 10000 # 最大处理点数,超过则动态下采样 def load_point_cloud(self, file_path: str) -> o3d.geometry.PointCloud: pcd = o3d.io.read_point_cloud(file_path) if not pcd.has_points(): raise ValueError("点云文件加载失败,无有效点数据") # 动态下采样:超过max_point_num时增大voxel_size point_num = len(pcd.points) if point_num > self.max_point_num: scale_factor = np.sqrt(point_num / self.max_point_num) self.voxel_size *= scale_factor print(f"点云点数过多({point_num}),动态调整voxel_size为{self.voxel_size:.6f}") return pcd def _auto_calculate_voxel_size(self) -> float: bbox = self.pcd.get_axis_aligned_bounding_box() diag_length = np.linalg.norm(bbox.max_bound - bbox.min_bound) return diag_length / 500 def _calculate_block_local_density(self, points_block: np.ndarray, radius: float = 0.01) -> np.ndarray: """分块计算局部密度,加速大点数处理""" n_points = len(points_block) densities = np.zeros(n_points) # 对每个点,计算块内半径r内的点数(避免全局计算) for i in range(n_points): dists = np.linalg.norm(points_block - points_block[i], axis=1) densities[i] = np.sum(dists < radius) return densities def _get_local_dbscan_params(self, points: np.ndarray) -> tuple: """局部密度自适应DBSCAN参数(分块加速)""" n_points = len(points) if n_points <= 5000: # 点数少,全局计算 radius = max(self.voxel_size * 5, 0.005) densities = self._calculate_block_local_density(points, radius) else: # 点数多,分块计算(每块5000点) n_blocks = (n_points + 4999) // 5000 densities = np.zeros(n_points) radius = max(self.voxel_size * 5, 0.005) for i in tqdm(range(n_blocks), desc="分块计算局部密度"): start_idx = i * 5000 end_idx = min((i+1)*5000, n_points) block = points[start_idx:end_idx] block_densities = self._calculate_block_local_density(block, radius) densities[start_idx:end_idx] = block_densities # 动态调整k和eps local_k = np.clip(5 + densities / 2, 5, 30).astype(int) local_eps = self.voxel_size * (local_k / 2) return local_eps, local_k def preprocess_point_cloud(self) -> None: print("开始点云预处理...") # 第一步:下采样+全局离群点过滤 downsampled = self.pcd.voxel_down_sample(voxel_size=self.voxel_size) points = np.asarray(downsampled.points) center = np.mean(points, axis=0) relative_distances = np.linalg.norm(points - center, axis=1) max_relative_dist = np.percentile(relative_distances, 98) valid_mask = relative_distances <= max_relative_dist downsampled = downsampled.select_by_index(np.where(valid_mask)[0]) print(f"下采样后点数:{len(downsampled.points)}") # 第二步:材质适配统计滤波 if self.material == WorkpieceMaterial.ALUMINUM: self.filtered_pcd, _ = downsampled.remove_statistical_outlier(nb_neighbors=30, std_ratio=2.5) elif self.material == WorkpieceMaterial.CARBON_STEEL: self.filtered_pcd, _ = downsampled.remove_statistical_outlier(nb_neighbors=15, std_ratio=1.5) else: self.filtered_pcd, _ = downsampled.remove_statistical_outlier(nb_neighbors=20, std_ratio=2.0) print(f"统计滤波后点数:{len(self.filtered_pcd.points)}") # 第三步:局部密度自适应DBSCAN形态学滤波 self.filtered_pcd = self._morphological_filter(self.filtered_pcd) print(f"DBSCAN滤波后点数:{len(self.filtered_pcd.points)}") # 第四步:平面分割(自适应距离阈值) plane_model, inliers = self.filtered_pcd.segment_plane( distance_threshold=self.voxel_size * 2, ransac_n=3, num_iterations=1000 ) self.filtered_pcd = self.filtered_pcd.select_by_index(inliers, invert=True) print(f"平面分割后点数:{len(self.filtered_pcd.points)}") print("点云预处理完成!") def _morphological_filter(self, pcd: o3d.geometry.PointCloud) -> o3d.geometry.PointCloud: """局部密度自适应DBSCAN:大点数加速版""" points = np.asarray(pcd.points) if len(points) < 10: print("警告:点云数量过少,返回原始点云") return pcd # 获取自适应参数 local_eps, local_k = self._get_local_dbscan_params(points) global_eps = np.median(local_eps) global_k = int(np.median(local_k)) min_samples = max(global_k // 2, 3) # DBSCAN聚类 print(f"DBSCAN参数:eps={global_eps:.6f},min_samples={min_samples}") clustering = DBSCAN(eps=global_eps, min_samples=min_samples).fit(points) # 异常处理:无有效簇时重试 if len(set(clustering.labels_)) == 1 and clustering.labels_[0] == -1: print(f"警告:DBSCAN未识别有效簇,使用宽松参数重试(eps={global_eps*1.5:.6f})") clustering = DBSCAN(eps=global_eps*1.5, min_samples=2).fit(points) if len(set(clustering.labels_)) == 1 and clustering.labels_[0] == -1: print("警告:重试失败,返回原始点云") return pcd # 保留最大簇 main_cluster_idx = np.argmax(np.bincount(clustering.labels_[clustering.labels_ != -1])) main_cluster_points = points[clustering.labels_ == main_cluster_idx] pcd_out = o3d.geometry.PointCloud() pcd_out.points = o3d.utility.Vector3dVector(main_cluster_points) return pcd_out def extract_weld_seam(self, threshold: float = 0.02) -> None: if self.filtered_pcd is None: raise RuntimeError("请先执行点云预处理") print("开始提取焊缝...") # Alpha形状重建 mesh mesh, _ = o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape(self.filtered_pcd, alpha=0.03) vertices = np.asarray(mesh.vertices) # 计算曲率,筛选焊缝点(曲率高的点为焊缝边缘) curvatures = self._calculate_curvature(mesh) seam_candidates = vertices[curvatures > threshold] # PCA+贪心最近邻排序(保证焊缝点顺序) pca_mean = np.mean(seam_candidates, axis=0) pca_cov = np.cov(seam_candidates - pca_mean, rowvar=False) eigen_values, eigen_vectors = np.linalg.eig(pca_cov) main_dir = eigen_vectors[:, np.argmax(eigen_values)] seam_proj = seam_candidates @ main_dir sorted_points = self._greedy_nearest_neighbor_sort(seam_candidates, seam_proj) self.weld_seam_points = sorted_points print(f"焊缝提取完成:{len(self.weld_seam_points)} 个路径点") @staticmethod def _calculate_curvature(mesh: o3d.geometry.TriangleMesh) -> np.ndarray: """计算顶点曲率""" normals = np.asarray(mesh.vertex_normals) vertices = np.asarray(mesh.vertices) curvatures = np.zeros(len(vertices)) for i in range(len(vertices)): # 找到相邻顶点 adj_vertices = [] for triangle in mesh.triangles: if i in triangle: adj_vertices.extend([v for v in triangle if v != i]) adj_vertices = list(set(adj_vertices)) if len(adj_vertices) < 3: curvatures[i] = 0 continue # 曲率=相邻法向量的方差 adj_normals = normals[adj_vertices] curvatures[i] = np.var(adj_normals) return curvatures @staticmethod def _greedy_nearest_neighbor_sort(points: np.ndarray, proj_values: np.ndarray) -> np.ndarray: """贪心最近邻排序,保证焊缝点顺序连续""" init_order = np.argsort(proj_values) sorted_points = points[init_order] visited = [False] * len(sorted_points) result = [] current_idx = 0 visited[current_idx] = True result.append(sorted_points[current_idx]) for _ in tqdm(range(len(sorted_points)-1), desc="焊缝点排序"): current_point = result[-1] min_dist = float("inf") next_idx = -1 # 局部搜索(减少计算量) search_range = min(20, len(sorted_points) - len(result)) start_idx = max(0, current_idx - search_range) end_idx = min(len(sorted_points), current_idx + search_range) for i in range(start_idx, end_idx): if not visited[i]: dist = np.linalg.norm(sorted_points[i] - current_point) if dist < min_dist: min_dist = dist next_idx = i # 全局搜索(局部未找到时) if next_idx == -1: for i in range(len(sorted_points)): if not visited[i]: dist = np.linalg.norm(sorted_points[i] - current_point) if dist < min_dist: min_dist = dist next_idx = i visited[next_idx] = True result.append(sorted_points[next_idx]) current_idx = next_idx return np.array(result) # -------------------------- 3. 路径规划(CSV均匀化+姿态导出) -------------------------- class WeldPathPlanner: def __init__(self, seam_points: np.ndarray, obstacle_meshes: list = None, wire_diameter: float = 1.2): self.seam_points = seam_points self.obstacle_meshes = obstacle_meshes or [] self.wire_diameter = wire_diameter self.tool_radius = 0.015 + wire_diameter / 1000 self.safety_margin = 0.02 self.planned_path = None self.tool_orientation = None self.local_coords = None def _construct_local_coordinate_system(self) -> None: """构建局部坐标系(x:焊缝法向,y:焊缝横向,z:焊缝方向)""" pca_mean = np.mean(self.seam_points, axis=0) pca_cov = np.cov(self.seam_points - pca_mean, rowvar=False) eigen_vectors = np.linalg.eig(pca_cov)[1] forward_dir = eigen_vectors[:, np.argmax(np.linalg.eig(pca_cov)[0])] # z轴:焊缝方向 forward_dir = forward_dir / np.linalg.norm(forward_dir) # 计算法向(x轴) normals = [] for i in range(1, len(self.seam_points)-1): vec1 = self.seam_points[i] - self.seam_points[i-1] vec2 = self.seam_points[i+1] - self.seam_points[i] normal = np.cross(vec1, vec2) normal = normal / np.linalg.norm(normal) normals.append(normal) up_dir = np.mean(normals, axis=0) if normals else np.array([0, 1, 0]) # x轴:法向 up_dir = up_dir / np.linalg.norm(up_dir) # 横向(y轴) lateral_dir = np.cross(forward_dir, up_dir) lateral_dir = lateral_dir / np.linalg.norm(lateral_dir) self.local_coords = [] for point in self.seam_points: self.local_coords.append((point, forward_dir, up_dir, lateral_dir)) def generate_smooth_path( self, offset_distance: float = 0.01, use_ompl: bool = True, max_turn_angle: float = 60.0, smooth_degree: int = 3, export_ompl_path: bool = True, export_pose_type: str = "quaternion", # 姿态导出类型:quaternion/euler/none path_step: float = 0.005 # 均匀化路径步长(米) ) -> None: self._construct_local_coordinate_system() # 生成初始偏移路径(焊接枪嘴位置) offset_points = [] for (point, _, up_dir, _) in self.local_coords: offset_point = point + up_dir * offset_distance # 沿法向偏移 offset_points.append(offset_point) offset_points = np.array(offset_points) # 路径优化 print("开始路径规划...") if use_ompl and OMPL_AVAILABLE: self.planned_path = self._ompl_rrt_star_optimize(offset_points) if export_ompl_path: self._export_path_to_csv( self.planned_path, self.tool_orientation, filename="ompl_planned_path.csv", pose_type=export_pose_type, path_step=path_step ) else: self.planned_path = self._enhanced_rrt_star_optimize(offset_points) # 工艺约束平滑 self.planned_path = self._process_smooth_path( max_turn_angle=max_turn_angle, smooth_degree=smooth_degree ) # 路径均匀化(固定步长) self.planned_path = self._uniformize_path(self.planned_path, step=path_step) self._calculate_tool_orientation() print(f"路径规划完成:均匀化后点数={len(self.planned_path)},步长={path_step}m") def _ompl_rrt_star_optimize(self, start_goal_points: np.ndarray) -> np.ndarray: """OMPL RRT*:带收敛判断+碰撞检测""" space = ob.SE3StateSpace() bbox = self._get_path_bbox(start_goal_points) bounds = ob.RealVectorBounds(3) bounds.setLow(bbox[0]) bounds.setHigh(bbox[1]) space.setBounds(bounds) si = ob.SpaceInformation(space) si.setStateValidityChecker(ob.StateValidityCheckerFn(lambda s: self._ompl_is_state_valid(s, si))) si.setup() # 起点和目标 start = ob.State(space) start().setX(start_goal_points[0][0]) start().setY(start_goal_points[0][1]) start().setZ(start_goal_points[0][2]) goal = ob.State(space) goal().setX(start_goal_points[-1][0]) goal().setY(start_goal_points[-1][1]) goal().setZ(start_goal_points[-1][2]) # 问题定义与规划器 pdef = ob.ProblemDefinition(si) pdef.setStartAndGoalStates(start, goal, 0.05) planner = og.RRTstar(si) planner.setProblemDefinition(pdef) planner.setMaxIterations(2000) planner.setMaxPlanningTime(10.0) pdef.setOptimizationObjective(ob.PathLengthOptimizationObjective(si)) # 运行规划 solved = planner.solve(10.0) if solved: path = pdef.getSolutionPath() path.interpolate() path_np = [] for i in range(path.getStateCount()): s = path.getState(i) path_np.append([s()[0], s()[1], s()[2]]) return np.array(path_np) else: print("OMPL RRT*规划失败,切换为优化后的简化RRT*") return self._enhanced_rrt_star_optimize(start_goal_points) def _ompl_is_state_valid(self, state: ob.State, si: ob.SpaceInformation) -> bool: """OMPL状态有效性检查(碰撞检测)""" x = state()[0] y = state()[1] z = state()[2] tool_sphere = trimesh.creation.sphere(radius=self.tool_radius + self.safety_margin, center=[x, y, z]) for obs_mesh in self.obstacle_meshes: if trimesh.collision.collision_pair(tool_sphere, obs_mesh)[0]: return False return si.satisfiesBounds(state) def _enhanced_rrt_star_optimize(self, start_goal_points: np.ndarray) -> np.ndarray: """优化简化RRT*:收敛判断+路径重连""" collision_meshes = [obs for obs in self.obstacle_meshes if isinstance(obs, trimesh.Trimesh)] class Node: def __init__(self, point): self.point = point self.parent = None self.cost = 0.0 nodes = [Node(start_goal_points[0])] space_idx = index.Index() space_idx.insert(0, self._point_to_bbox(start_goal_points[0])) bbox = self._get_path_bbox(start_goal_points) max_iter = 2000 goal_point = start_goal_points[-1] goal_radius = 0.05 convergence_iter = 0 best_cost = float("inf") for _ in tqdm(range(max_iter), desc="简化RRT*规划"): # 采样策略 sample_point = self._random_sample(bbox) if np.random.random() < 0.8 else goal_point # 最近节点查找 nearest_idx = list(space_idx.nearest(self._point_to_bbox(sample_point), 1))[0] nearest_node = nodes[nearest_idx] # 新节点生成(自适应步长) direction = sample_point - nearest_node.point dir_norm = np.linalg.norm(direction) if dir_norm > 0: direction /= dir_norm step_size = 0.01 if self._is_obstacle_dense(nearest_node.point, collision_meshes) else 0.02 new_point = nearest_node.point + direction * step_size # 碰撞检测 if self._check_segment_collision(new_point, nearest_node.point, collision_meshes): continue # 路径重连 new_node = Node(new_point) new_node.cost = nearest_node.cost + np.linalg.norm(new_point - nearest_node.point) new_node.parent = nearest_node near_indices = list(space_idx.intersection(self._point_to_bbox(new_point, radius=0.1))) for near_idx in near_indices: near_node = nodes[near_idx] if not self._check_segment_collision(new_point, near_node.point, collision_meshes): new_cost = near_node.cost + np.linalg.norm(new_point - near_node.point) if new_cost < new_node.cost: new_node.cost = new_cost new_node.parent = near_node # 插入新节点 space_idx.insert(len(nodes), self._point_to_bbox(new_point)) nodes.append(new_node) # 收敛判断 if np.linalg.norm(new_point - goal_point) < goal_radius: if not self._check_segment_collision(new_point, goal_point, collision_meshes): goal_node = Node(goal_point) goal_node.cost = new_node.cost + np.linalg.norm(goal_point - new_point) goal_node.parent = new_node nodes.append(goal_node) space_idx.insert(len(nodes)-1, self._point_to_bbox(goal_point)) if goal_node.cost < best_cost * 1.05: convergence_iter += 1 if convergence_iter >= 50: break best_cost = min(best_cost, goal_node.cost) # 提取路径 if len(nodes) == 0 or not np.allclose(nodes[-1].point, goal_point, atol=0.1): print("简化RRT*优化失败,使用原始偏移路径") return start_goal_points path = [] current_node = nodes[-1] while current_node is not None: path.append(current_node.point) current_node = current_node.parent return np.array(path[::-1]) def _process_smooth_path(self, max_turn_angle: float = 60.0, smooth_degree: int = 3) -> np.ndarray: """工艺平滑:自定义转角和平滑程度""" if len(self.planned_path) < 3: return self.planned_path # 过滤急转角 max_angle_rad = np.radians(max_turn_angle) smooth_path = [self.planned_path[0]] for i in range(1, len(self.planned_path)-1): vec1 = self.planned_path[i] - self.planned_path[i-1] vec2 = self.planned_path[i+1] - self.planned_path[i] vec1_norm = vec1 / np.linalg.norm(vec1) vec2_norm = vec2 / np.linalg.norm(vec2) angle = np.arccos(np.clip(np.dot(vec1_norm, vec2_norm), -1.0, 1.0)) if angle <= max_angle_rad: smooth_path.append(self.planned_path[i]) smooth_path.append(self.planned_path[-1]) # B样条平滑 smooth_degree = np.clip(smooth_degree, 1, 5) if len(smooth_path) <= smooth_degree: smooth_degree = len(smooth_path) - 1 t = np.linspace(0, 1, len(smooth_path)) spline_x = make_interp_spline(t, [p[0] for p in smooth_path], k=smooth_degree) spline_y = make_interp_spline(t, [p[1] for p in smooth_path], k=smooth_degree) spline_z = make_interp_spline(t, [p[2] for p in smooth_path], k=smooth_degree) t_smooth = np.linspace(0, 1, len(smooth_path) * (smooth_degree + 1)) return np.column_stack([spline_x(t_smooth), spline_y(t_smooth), spline_z(t_smooth)]) def _uniformize_path(self, path: np.ndarray, step: float = 0.005) -> np.ndarray: """路径均匀化:固定步长采样""" if len(path) < 2: return path # 计算路径总长度和累计长度 dists = np.linalg.norm(np.diff(path, axis=0), axis=1) total_length = np.sum(dists) cum_dists = np.cumsum(dists) cum_dists = np.insert(cum_dists, 0, 0) # 生成均匀步长的采样点 uniform_dists = np.arange(0, total_length, step) if uniform_dists[-1] < total_length - 1e-6: uniform_dists = np.append(uniform_dists, total_length) # 插值生成均匀路径 uniform_path = [] for d in uniform_dists: # 找到d所在的线段 idx = np.searchsorted(cum_dists, d) if idx == 0: uniform_path.append(path[0]) elif idx >= len(cum_dists): uniform_path.append(path[-1]) else: # 线性插值 t = (d - cum_dists[idx-1]) / (cum_dists[idx] - cum_dists[idx-1]) point = (1 - t) * path[idx-1] + t * path[idx] uniform_path.append(point) return np.array(uniform_path) def _export_path_to_csv( self, path: np.ndarray, orientation: np.ndarray, filename: str = "planned_path.csv", pose_type: str = "quaternion", path_step: float = 0.005 ) -> None: """导出均匀化路径+姿态信息到CSV""" # 确保路径和姿态点数一致 if len(orientation) != len(path): # 姿态插值匹配路径点数 t_path = np.linspace(0, 1, len(path)) t_orient = np.linspace(0, 1, len(orientation)) orientation = np.array([ np.interp(t_path, t_orient, orientation[:, i]) for i in range(4) ]).T # 路径均匀化 uniform_path = self._uniformize_path(path, step=path_step) # 姿态均匀化(插值) t_uniform = np.linspace(0, 1, len(uniform_path)) t_original = np.linspace(0, 1, len(orientation)) uniform_orient = np.array([ np.interp(t_uniform, t_original, orientation[:, i]) for i in range(4) ]).T # 创建导出文件夹 if not os.path.exists("exported_paths"): os.makedirs("exported_paths") filepath = os.path.join("exported_paths", filename) # 写入CSV with open(filepath, "w", newline="", encoding="utf-8") as f: writer = csv.writer(f) # 表头 if pose_type == "quaternion": writer.writerow(["X", "Y", "Z", "QX", "QY", "QZ", "QW"]) for p, o in zip(uniform_path, uniform_orient): writer.writerow([p[0], p[1], p[2], o[0], o[1], o[2], o[3]]) elif pose_type == "euler": writer.writerow(["X", "Y", "Z", "Roll", "Pitch", "Yaw"]) for p, o in zip(uniform_path, uniform_orient): # 四元数转欧拉角(XYZ顺序,弧度) euler = R.from_quat(o).as_euler('xyz') writer.writerow([p[0], p[1], p[2], euler[0], euler[1], euler[2]]) else: writer.writerow(["X", "Y", "Z"]) writer.writerows(uniform_path) print(f"均匀化路径已导出至:{filepath}(步长={path_step}m)") def _calculate_tool_orientation(self) -> None: """计算工具姿态(四元数)""" self.tool_orientation = [] for (point, forward_dir, up_dir, lateral_dir) in self.local_coords: # 构建旋转矩阵(x:法向,y:横向,z:焊缝方向) rot_matrix = np.column_stack([up_dir, lateral_dir, forward_dir]) # 旋转矩阵转四元数(x,y,z,w) quat = R.from_matrix(rot_matrix).as_quat() self.tool_orientation.append(quat) self.tool_orientation = np.array(self.tool_orientation) # 辅助函数 def _get_path_bbox(self, points: np.ndarray) -> tuple: min_bound = np.min(points, axis=0) - 0.1 max_bound = np.max(points, axis=0) + 0.1 return (min_bound, max_bound) def _random_sample(self, bbox: tuple) -> np.ndarray: return np.random.uniform(bbox[0], bbox[1]) def _point_to_bbox(self, point: np.ndarray, radius: float = 0.01) -> tuple: return (point[0]-radius, point[1]-radius, point[2]-radius, point[0]+radius, point[1]+radius, point[2]+radius) def _check_segment_collision(self, p1: np.ndarray, p2: np.ndarray, collision_meshes: list) -> bool: """线段碰撞检测""" num_samples = int(np.linalg.norm(p2-p1)/0.005) + 1 # 每0.5cm采样一次 samples = np.linspace(p1, p2, num_samples) tool_radius = self.tool_radius + self.safety_margin for sample in samples: tool_sphere = trimesh.creation.sphere(radius=tool_radius, center=sample) for obs_mesh in collision_meshes: if trimesh.collision.collision_pair(tool_sphere, obs_mesh)[0]: return True return False def _is_obstacle_dense(self, point: np.ndarray, collision_meshes: list) -> bool: """判断障碍物密度""" tool_sphere = trimesh.creation.sphere(radius=self.tool_radius + self.safety_margin + 0.05, center=point) for obs_mesh in collision_meshes: if trimesh.collision.collision_pair(tool_sphere, obs_mesh)[0]: return True return False # -------------------------- 4. 焊接程序生成(兼容均匀化路径) -------------------------- class WeldProgramGenerator: def __init__( self, path: np.ndarray, orientation: np.ndarray, robot_brand: RobotBrand, workpiece_material: WorkpieceMaterial, plate_thickness: float = 5.0, wire_diameter: float = 1.2 ): self.path = path self.orientation = orientation self.robot_brand = robot_brand self.material = workpiece_material self.plate_thickness = plate_thickness self.wire_diameter = wire_diameter self.weld_params = self._match_weld_params() self.program = [] def _match_weld_params(self) -> dict: """匹配焊接工艺参数""" key = (self.material, self.plate_thickness, self.wire_diameter) if key not in WELD_PROCESS_DB: similar_keys = [k for k in WELD_PROCESS_DB.keys() if k[0] == self.material and k[2] == self.wire_diameter] if not similar_keys: raise ValueError(f"无匹配工艺参数:{self.material.value}, {self.plate_thickness}mm, {self.wire_diameter}mm") similar_keys.sort(key=lambda x: abs(x[1]-self.plate_thickness)) key = similar_keys[0] print(f"警告:无完全匹配参数,使用相近参数:板厚{key[1]}mm") current_range, voltage_range, speed_range = WELD_PROCESS_DB[key] return { "current": (current_range[0] + current_range[1]) / 2, "voltage": (voltage_range[0] + voltage_range[1]) / 2, "speed": (speed_range[0] + speed_range[1]) / 2 } def generate_program(self, program_name: str = "WELD_PROG01") -> tuple: """生成机器人焊接程序""" if self.robot_brand == RobotBrand.FANUC: robot_program = self._generate_fanuc_program(program_name) elif self.robot_brand == RobotBrand.ABB: robot_program = self._generate_abb_program(program_name) elif self.robot_brand == RobotBrand.KUKA: robot_program = self._generate_kuka_program(program_name) else: raise ValueError(f"不支持的机器人品牌:{self.robot_brand.value}") tat_file = self._generate_tat_file(program_name) return robot_program, tat_file def _generate_fanuc_program(self, program_name: str) -> str: """生成Fanuc TP程序""" self.program.clear() self.program.append(f"/PROG {program_name}") self.program.append("/ATTR") self.program.append("OWNER = MNEDITOR;") self.program.append(f"COMMENT = \"{self.material.value} {self.plate_thickness}mm 自动焊接程序(焊丝{self.wire_diameter}mm)\";") self.program.append("PROTECT = READ_WRITE;") self.program.append("/MN") self.program.append(" UNITS MM; ! 声明单位为毫米") self.program.append(f" CALL WELD_INIT({self.weld_params['current']:.0f}, {self.weld_params['voltage']:.1f});") self.program.append(f" SPEED {self.weld_params['speed']:.3f};") self.program.append(" ACC 1.0;") self.program.append(" FINE;") point_idx = 1 # 确保姿态点数与路径点数一致 if len(self.orientation) != len(self.path): t_path = np.linspace(0, 1, len(self.path)) t_orient = np.linspace(0, 1, len(self.orientation)) self.orientation = np.array([ np.interp(t_path, t_orient, self.orienta
12-05
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值