itertools mode 之 combinations用法

本文介绍如何使用Python的itertools库中的combinations函数解决组合总和问题,通过示例展示如何找出从1到9中选取k个数的所有可能组合,使这些数的和等于目标数n。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

itertools mode 中 combinations的功能:

from itertools import combinations
print list(combinations(list,r))
将打印 list中的所有长度为r的,子集的合集

e.g.
from itertools import combinations
list = [1,2,3]
print (list(combinations(list,2)))
>>>
[(1,2),(1,3),(2,3)]

所以当遇到排列组合的问题时,combinations会很好用


leetcode例题:

216. Combination Sum III

Find all possible combinations of k numbers that add up to a number n, given that only numbers from 1 to 9 can be used and each combination should be a unique set of numbers.

Note:

  • All numbers will be positive integers.
  • The solution set must not contain duplicate combinations.

Example 1:

Input: k = 3, n = 7
Output: [[1,2,4]]

Example 2:

Input: k = 3, n = 9
Output: [[1,2,6], [1,3,5], [2,3,4]]

思路:利用combinations列出符合长度k的子集,再筛选出合为n的子集
from itertools import combinations
class Solution(object):
    def combinationSum3(self, k, n):
return [res for res in combinations(range(1,10),k) if sum(res) == n]

详细点的写法:
from itertools import combinations
class Solution(object):
    def combinationSum3(self, k, n):
res = []
raw = combinations(range(1,10),k)
for i in raw:
if sum(i) == n:
res.append(i)
return res










转载于:https://www.cnblogs.com/phinza/p/10231257.html

import numpy as np import itertools def read_lammps_dump(file_path): """ 读取 LAMMPS dump 文件并返回原子坐标和盒子尺寸 """ with open(file_path, 'r') as f: lines = f.readlines() # 找到原子数量和盒子尺寸 for i, line in enumerate(lines): if 'ITEM: NUMBER OF ATOMS' in line: num_atoms = int(lines[i + 1].strip()) elif 'ITEM: BOX BOUNDS' in line: box_bounds = np.array([list(map(float, lines[i + j + 1].split())) for j in range(3)]) box_size = box_bounds[:, 1] - box_bounds[:, 0] volume = np.prod(box_size) elif 'ITEM: ATOMS' in line: start_index = i + 1 break # 读取原子坐标 coordinates = np.zeros((num_atoms, 3)) for i in range(num_atoms): line = lines[start_index + i] data = line.split() coordinates[i] = np.array([float(data[-3]), float(data[-2]), float(data[-1])]) return coordinates, volume def compute_rdf(coordinates, volume, num_bins=100, r_max=10.0): """ 计算径向分布函数 (RDF) """ num_atoms = len(coordinates) dr = r_max / num_bins rdf = np.zeros(num_bins) # 计算所有原子对之间的距离 for i, j in itertools.combinations(range(num_atoms), 2): dist = np.linalg.norm(coordinates[i] - coordinates[j]) bin_index = int(dist / dr) if bin_index < num_bins: rdf[bin_index] += 2 # 计算每个 bin 的理论原子数 n_ideal = np.zeros(num_bins) r = np.linspace(0, r_max, num_bins, endpoint=False) + dr / 2 for i in range(num_bins): n_ideal[i] = 4 * np.pi * r[i]**2 * dr * (num_atoms / volume) # 归一化 RDF rdf = rdf / (n_ideal * num_atoms) return r, rdf # 读取 LAMMPS dump 文件 coordinates, volume = read_lammps_dump('Cu_npt.xyz') # 计算 RDF,将 num_bins 设置为 202 r, g_r = compute_rdf(coordinates, volume, num_bins=100, r_max=10.0) # 保存结果到文件 with open('rdf_temp_py.data', 'w') as f: f.write('# 分区 半径r 径向分布函数值\n') for i in range(len(r)): f.write(f'{i + 1} {r[i]} {g_r[i]}\n') print("RDF 计算完成,结果已保存到 rdf_temp_py.data 文件中。")这是我的脚本,请你根据你先前的判断再看一遍,然后返还我完整的修改后版本
04-04
你是一个程序员,现在请你看看以下代码逻辑上有什么问题:import os import random import numpy as np import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.utils.tensorboard import SummaryWriter from torchvision import transforms from torch.utils.data import Dataset, DataLoader from PIL import Image import matplotlib.pyplot as plt import itertools # ================= 配置参数 ================= TRAIN_ROOT = "MRI_END/train" TEST_ROOT = "MRI_END/test" MODALITIES = ['PD', 'T1', 'T2'] MODALITY_STATS = { 'PD': {'mean': [0.1138], 'std': [0.1147]}, 'T1': {'mean': [0.1632], 'std': [0.1887]}, 'T2': {'mean': [0.1082], 'std': [0.1121]} } # ================= 数据预处理 ================= class MedicalTransform: def __init__(self, phase='train'): if phase == 'train': self.transform = transforms.Compose([ transforms.Resize((128, 128)), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomRotation(30), transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2), transforms.ToTensor(), transforms.RandomErasing(p=0.3, scale=(0.02, 0.1)),# 模拟遮挡 transforms.Normalize(mean=stats['mean'], std=stats['std']) ]) else: self.transform = transforms.Compose([ transforms.Resize(128 + 32), transforms.CenterCrop(128), transforms.ToTensor(), ]) def __call__(self, img, modality): img = self.transform(img) stats = MODALITY_STATS[modality] return transforms.Normalize(mean=stats['mean'], std=stats['std'])(img) # ================= 数据集类 ================= class KneeMRIDataset(Dataset): def __init__(self, base_path, phase='train'): self.phase = phase self.samples = [] self.mod_combinations = [] self.transform = MedicalTransform(phase) for age_folder in os.listdir(base_path): age_path = os.path.join(base_path, age_folder) if not os.path.isdir(age_path): continue for subject_folder in os.listdir(age_path): subject_path = os.path.join(age_path, subject_folder) if not os.path.isdir(subject_path): continue parts = subject_folder.split('_', 1) gender = 0 if parts[0] == 'M' else 1 age_val = int(age_folder.split('_')[1]) mod_files = {} for fname in os.listdir(subject_path): mod_prefix = fname.split('_')[0].upper() if mod_prefix in MODALITIES: mod_files[mod_prefix] = os.path.join(subject_path, fname) if len(mod_files) >= 2: valid_combs = list(itertools.combinations(mod_files.keys(), 2))#生成所有模态组合 self.samples.append({ 'age': age_val, 'gender': gender,#性别 'mod_files': mod_files, 'valid_combs': valid_combs#可用模态 }) self.mod_combinations.extend(valid_combs) #排序去重转化为列表 self.mod_combinations = list(set(tuple(sorted(c)) for c in self.mod_combinations)) def __len__(self): return len(self.samples) def __getitem__(self, idx): sample = self.samples[idx] if self.phase == 'train': selected_mods = random.choice(sample['valid_combs'])#随机选取一组模态 # 构建模态字典 modality_images = {} for mod in selected_mods: img = Image.open(sample['mod_files'][mod]).convert('L') img = self.transform(img, mod) modality_images[mod] = img return { 'modality_images': modality_images, 'available_modes': selected_mods, 'age': sample['age'], 'gender': sample['gender'], 'mod_comb': self.mod_combinations.index(tuple(sorted(selected_mods))) } # ================= 新特征融合模块 ================= class ModalitySelector(nn.Module): def __init__(self): super().__init__() # 共享编码器结构(适用于所有模态) self.encoder = nn.Sequential( nn.Conv2d(1, 64, 3, padding=1), # 单通道输入(灰度医学影像) nn.ReLU(), nn.MaxPool2d(2), # 空间下采样 nn.Conv2d(64, 128, 3, padding=1), nn.ReLU(), nn.AdaptiveAvgPool2d((16, 16))#确保不同分辨率的输入输出统一尺寸 ) def forward(self, input_dict, available_modes): features = {}#available_modes 参数允许运行时灵活选择模态组合 for mode in available_modes: # 所有模态共享相同的编码器 features[mode] = self.encoder(input_dict[mode])# 共享编码器 return features class DualAttentionFusion(nn.Module): def __init__(self, in_channels = 128): super().__init__() # 自注意力:提取模态内特征 self.self_attn = nn.MultiheadAttention(in_channels, num_heads=4) # 跨模态注意力:建立模态间关联 self.cross_attn = nn.MultiheadAttention(in_channels, num_heads=4) # 特征压缩层 self.compression = nn.Sequential( nn.Linear(in_channels, in_channels//2), nn.ReLU() ) def forward(self, feat1, feat2): # 展平空间维度 (B, C, H, W) -> (B, H*W, C) B, C, H, W = feat1.size() feat1 = feat1.view(B, C, -1).permute(2, 0, 1) # (L, B, C) feat2 = feat2.view(B, C, -1).permute(2, 0, 1) # 自注意力增强 feat1_attn, _ = self.self_attn(feat1, feat1, feat1) feat2_attn, _ = self.self_attn(feat2, feat2, feat2) # 跨模态交互以feat1为Query,feat2为Key/Value) fused, _ = self.cross_attn(feat1_attn, feat2_attn, feat2_attn) # 压缩特征维度 fused = fused.permute(1, 2, 0) # (B, C, L) fused = fused.mean(dim=-1) # (B, C) return self.compression(fused) class GenderFusion(nn.Module): def __init__(self, img_feat_dim, gender_dim=32): super().__init__() # 性别嵌入层 self.gender_emb = nn.Embedding(2, gender_dim) # 门控注意力机制 self.attn_gate = nn.Sequential( nn.Linear(img_feat_dim + gender_dim, 128), nn.ReLU(), nn.Linear(128, img_feat_dim), nn.Sigmoid() ) # 特征调制 self.feature_modulator = nn.Sequential( nn.Conv1d(img_feat_dim, img_feat_dim//2, 1), nn.ReLU(), nn.Conv1d(img_feat_dim//2, img_feat_dim, 1), nn.Sigmoid() ) def forward(self, img_feat, gender_labels): # 性别嵌入 gender_emb = self.gender_emb(gender_labels) # 特征调制 spatial_weights = self.feature_modulator(img_feat.unsqueeze(2)).squeeze(2) modulated_feat = img_feat * spatial_weights # 注意力门控 fused = torch.cat([modulated_feat, gender_emb], dim=1) attn_weights = self.attn_gate(fused) weighted_feat = modulated_feat * attn_weights return weighted_feat class EnhancedKneeAgePredictor(nn.Module): def __init__(self, num_combinations): super().__init__() # 创建模型但不加载预训练权重 self.feature_extractor = timm.create_model( 'resnet18', pretrained=True, in_chans=1 ) num_features = feature_extractor.fc.in_features feature_extractor.fc = nn.Linear(num_features,1) # 模态选择器 self.mod_selector = ModalitySelector() # 双模态注意力融合 self.dual_attn_fusion = DualAttentionFusion(128) # 性别融合 self.gender_fusion = GenderFusion(64, 32) # 分类头 self.classifier = nn.Sequential( nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, 1) ) # 嵌入层 self.mod_emb = nn.Embedding(num_combinations, 64) # 模态组合映射 self.mod_mapper = nn.Embedding(len(MODALITIES), 128) def forward(self, input_dict, gender, mod_comb, available_modes): # 特征提取 features = self.mod_selector(input_dict, available_modes) # 获取模态特征 mod_keys = list(features.keys()) feat1 = features[mod_keys[0]] feat2 = features[mod_keys[1]] # 模态映射 mod1_bias = self.mod_mapper(torch.tensor(MODALITIES.index(mod_keys[0]), device=feat1.device)) mod2_bias = self.mod_mapper(torch.tensor(MODALITIES.index(mod_keys[1]), device=feat1.device)) feat1 = feat1 + mod1_bias.view(1, -1, 1, 1) feat2 = feat2 + mod2_bias.view(1, -1, 1, 1) # 双模态融合 fused = self.dual_attn_fusion(feat1, feat2) # 性别融合 gender_fused = self.gender_fusion(fused, gender) # 模态组合嵌入 mod_bias = self.mod_emb(mod_comb) final_feat = gender_fused + mod_bias return self.classifier(final_feat).squeeze(1) # ================= 训练流程 - 已修改 ================= # 数据集初始化 train_val_dataset = KneeMRIDataset(TRAIN_ROOT, phase='train') num_combinations = len(train_val_dataset.mod_combinations) # 数据拆分 train_size = int(0.8 * len(train_val_dataset)) train_set, val_set = random_split( train_val_dataset, [train_size, len(train_val_dataset)-train_size], generator=torch.Generator().manual_seed(42) ) test_dataset = KneeMRIDataset(TEST_ROOT, phase='test') # 数据加载器配置 train_loader = DataLoader(train_set, batch_size=64, shuffle=True,persistent_workers=True, num_workers=4, pin_memory=True) val_loader = DataLoader(val_set, batch_size=64, num_workers=2, pin_memory=True) test_loader = DataLoader(test_dataset, batch_size=64, num_workers=2, pin_memory=True) # 设备选择 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model =EnhancedKneeAgePredictor(num_combinations).to(device) optimizer = optim.AdamW(model.parameters(), lr=0.0001, weight_decay=0.05) criterion = nn.HuberLoss() # 训练日志初始化 training_log = { 'train_loss': [], 'val_loss': [], 'test_loss': [], 'train_acc': [], 'val_acc': [], 'test_acc': [], 'best_acc': 0.0 } # 主训练循环 for epoch in range(60): train_set.dataset.set_epoch(epoch) # 训练阶段 model.train() epoch_loss, correct_preds = 0.0, 0 for batch in train_loader: inputs = batch['modality_images'].to(device) labels = batch['age'].float().to(device) # 保持浮点型 genders = batch['gender'].to(device) mod_combs = batch['mod_comb'].to(device) optimizer.zero_grad() outputs = model(inputs, genders, mod_combs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # 回归任务准确率计算(误差±1岁内视为正确)[5](@ref) pred_ages = outputs.squeeze() correct = torch.abs(pred_ages - labels) < 1.0 correct_preds += correct.sum().item() epoch_loss += loss.item() * len(labels) # 用len(labels)替代inputs.size(0) # 验证阶段 model.eval() val_loss, val_correct = 0.0, 0 with torch.no_grad(): torch.manual_seed(42) for batch in val_loader: inputs = batch['images'].to(device) labels = batch['age'].float().to(device) # 保持浮点型 genders = batch['gender'].to(device) mod_combs = batch['mod_comb'].to(device) outputs = model(inputs, genders, mod_combs) loss = criterion(outputs, labels) val_loss += loss.item() * inputs.size(0) pred_ages = outputs.squeeze() correct = torch.abs(pred_ages - labels) < 1.0 val_correct += correct.sum().item() # 测试阶段 test_loss, test_correct = 0.0, 0 with torch.no_grad(): for batch in test_loader: inputs = batch['images'].to(device) labels = batch['age'].float().to(device) # 保持浮点型 genders = batch['gender'].to(device) mod_combs = batch['mod_comb'].to(device) outputs = model(inputs, genders, mod_combs) loss = criterion(outputs, labels) test_loss += loss.item() * inputs.size(0) pred_ages = outputs.squeeze() correct = torch.abs(pred_ages - labels) < 1.0 val_correct += correct.sum().item() # 记录指标 train_loss = epoch_loss / len(train_set) train_acc = correct_preds / len(train_set) val_loss = val_loss / len(val_set) val_acc = val_correct / len(val_set) test_loss = test_loss / len(test_dataset) test_acc = test_correct / len(test_dataset) training_log['train_loss'].append(train_loss) training_log['train_acc'].append(train_acc) training_log['val_loss'].append(val_loss) training_log['val_acc'].append(val_acc) training_log['test_loss'].append(test_loss) training_log['test_acc'].append(test_acc) # 模型保存逻辑 if val_acc > training_log['best_acc']: training_log['best_acc'] = val_acc torch.save({ 'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': train_loss, 'accuracy': val_acc }, 'best_age_predictor.pth') # 训练进度输出 print(f'Epoch [{epoch+1:02d}/{TOTAL_EPOCHS}]') print(f'Train Loss: {train_loss:.4f} | Acc: {train_acc:.4f}') print(f'Val Loss: {val_loss:.4f} | Acc: {val_acc:.4f}') print(f'Test Loss: {test_loss:.4f} | Acc: {test_acc:.4f}\n') # 可视化训练过程 plt.figure(figsize=(15, 6)) plt.subplot(1, 2, 1) plt.plot(training_log['train_loss'], label='Train', linestyle='--') plt.plot(training_log['val_loss'], label='Val', linestyle='-.') plt.plot(training_log['test_loss'], label='Test', linestyle='-') plt.title('Loss Trajectory'), plt.xlabel('Epoch'), plt.ylabel('Loss'), plt.legend() plt.subplot(1, 2, 2) plt.plot(training_log['train_acc'], label='Train', linestyle='--') plt.plot(training_log['val_acc'], label='Val', linestyle='-.') plt.plot(training_log['test_acc'], label='Test', linestyle='-') plt.title('Accuracy Progress'), plt.xlabel('Epoch'), plt.ylabel('Accuracy'), plt.legend() plt.tight_layout() plt.savefig('training_metrics.png') plt.show()
最新发布
07-21
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值