416. Partition Equal Subset Sum

探讨如何判断一个正整数数组是否可以分为两个子集,使两子集元素之和相等。介绍两种方法:深度优先搜索(DFS)及动态规划(DP)。最终采用DP实现,通过构建二维布尔数组解决此问题。

Given a non-empty array containing only positive integers, find if the array can be partitioned into two subsets such that the sum of elements in both subsets is equal.

Note:

  1. Each of the array element will not exceed 100.
  2. The array size will not exceed 200.

 

Example 1:

Input: [1, 5, 11, 5]

Output: true

Explanation: The array can be partitioned as [1, 5, 5] and [11].

 

Example 2:

Input: [1, 2, 3, 5]

Output: false

Explanation: The array cannot be partitioned into equal sum subsets.

这道题可以说非常好,我想到两种方法,第一种是dfs,current sum==target sum就return True,否则for i in range(len(nums)),每次选一个num[i]加到current sum里,然后nums变为nums[:i]+nums[i+1:],进行递归。

可惜啊,这方法超时了。

于是乎,咱们直接上dp。说到dp这题是subset sum的变种题。

图表:

横坐标是amount也就是target sum,纵坐标是nums,dp规则见代码。就这样~

class Solution(object):
    def canPartition(self, nums):
        """
        :type nums: List[int]
        :rtype: bool
        """
        totalsum=sum(nums)
        if totalsum%2:
            return False
        targetsum=totalsum/2
        
        mem=[[False for _ in range(targetsum+1)] for _ in range(len(nums))]
        for r in range(len(nums)):
            mem[r][0]=True
        
        for r in range(len(nums)):
            for c in range(1,targetsum+1):
                if nums[r]>c:
                    mem[r][c]=mem[r-1][c]
                else:
                    if mem[r-1][c]:
                        mem[r][c]=True
                    else:
                        mem[r][c]=mem[r-1][c-nums[r]]
        
        return mem[-1][-1]
        

 

独立储能的现货电能量与调频辅助服务市场出清协调机制(Matlab代码实现)内容概要:本文围绕“独立储能的现货电能量与调频辅助服务市场出清协调机制”展开,提出了一种基于Matlab代码实现的优化模型,旨在协调独立储能系统在电力现货市场与调频辅助服务市场中的联合出清问题。文中结合鲁棒优化、大M法和C&CG算法处理不确定性因素,构建了多市场耦合的双层或两阶段优化框架,实现了储能资源在能量市场和辅助服务市场间的最优分配。研究涵盖了市场出清机制设计、储能运行策略建模、不确定性建模及求解算法实现,并通过Matlab仿真验证了所提方法的有效性和经济性。; 适合人群:具备一定电力系统基础知识和Matlab编程能力的研究生、科研人员及从事电力市场、储能调度相关工作的工程技术人员。; 使用场景及目标:①用于研究独立储能在多电力市场环境下的协同优化运行机制;②支撑电力市场机制设计、储能参与市场的竞价策略分析及政策仿真;③为学术论文复现、课题研究和技术开发提供可运行的代码参考。; 阅读建议:建议读者结合文档中提供的Matlab代码与算法原理同步学习,重点关注模型构建逻辑、不确定性处理方式及C&CG算法的具体实现步骤,宜在掌握基础优化理论的前提下进行深入研读与仿真调试。
# %% [markdown] # ## 光纤异常检测系统(Jupyter优化版) # %% [markdown] # ### 1. 环境准备 # %% # 安装必要库(如果尚未安装) # !pip install pandas numpy scikit-learn matplotlib import pandas as pd import numpy as np from sklearn.preprocessing import StandardScaler from collections import defaultdict import time import matplotlib.pyplot as plt # %% [markdown] # ### 2. 数据加载模块 # %% def load_otdr_data(path): """优化版数据加载,自动生成示例数据""" try: df = pd.read_csv(path) except FileNotFoundError: print(f"未找到文件 {path},生成示例数据...") np.random.seed(42) data = { 'SNR': np.random.uniform(20, 40, 1000), 'Position': np.random.randint(0, 100000, 1000), } for i in range(1,31): data[f'P{i}'] = np.random.normal(i*0.1, 1, 1000) df = pd.DataFrame(data) df.iloc[::50, 5:] += np.random.normal(5, 1, (20,25)) # 添加异常模式 # 添加特征权重标记 waveform_features = [f'P{i}' for i in range(1,31)] return df[['SNR', 'Position'] + waveform_features], waveform_features # %% [markdown] # ### 3. 增强型贝叶斯分区模型 # %% class JupyterBayesianPartitioner: def __init__(self, m_partitions=8, alpha=1e-4, feature_weights=None): self.m = m_partitions self.alpha = alpha self.feature_weights = feature_weights or {f'P{i}':1.0 for i in range(1,31)} self.partition_probs = defaultdict(float) self.feature_probs = defaultdict(dict) self.scaler = StandardScaler() def _create_partitions(self, data): """可视化分区过程""" if data.ndim == 1: data = data.reshape(1, -1) bins = np.percentile(data, np.linspace(0, 100, self.m + 1)) counts, _ = np.histogram(data, bins=bins) # 显示分区可视化 if not hasattr(self, '_shown_viz'): plt.figure(figsize=(10,4)) plt.title(f"Feature Partition Visualization (m={self.m})") for i in range(data.shape[1]): plt.hist(data[:,i], bins=bins, alpha=0.3) plt.show() self._shown_viz = True return counts def _apply_weights(self, sample): return [sample[i] * self.feature_weights.get(f'P{i+1}',1.0) for i in range(len(sample))] def fit(self, X): """带进度显示的训练过程""" from IPython.display import clear_output # 特征加权 X_weighted = np.array([self._apply_weights(x) for x in X]) # 数据标准化 self.scaler.fit(X_weighted) scaled = self.scaler.transform(X_weighted) # 分区统计 partition_counts = defaultdict(int) for i, sample in enumerate(scaled): partition = tuple(self._create_partitions(sample)) partition_counts[partition] += 1 # 显示进度 if i % 100 == 0: clear_output(wait=True) print(f"Training: {i+1}/{len(X)} samples processed") # 计算先验概率 total = len(X) for k, count in partition_counts.items(): self.partition_probs[k] = (count + self.alpha) / (total + self.alpha * len(partition_counts)) # 特征概率计算 for idx, (partition, p_k) in enumerate(self.partition_probs.items()): mask = np.array([np.array_equal(self._create_partitions(x), partition) for x in scaled]) subset = scaled[mask] stats = { 'mean': subset.mean(axis=1), 'std': subset.std(axis=1), 'range': np.ptp(subset, axis=1), 'wavelet': np.sum(np.abs(np.diff(subset))**2, axis=1) } self.feature_probs[partition] = { feat: (np.histogram(vals, bins=20, density=True)[0] + self.alpha) for feat, vals in stats.items() } clear_output(wait=True) print(f"Processing partitions: {idx+1}/{len(self.partition_probs)}") # %% [markdown] from IPython.display import clear_output # 添加此行以导入 clear_output def jupyter_detect_anomalies(data_path, config): """交互式检测流程""" # 初始化 start_time = time.time() plt.figure(figsize=(10,3)) # 动态配置 feature_weights = {f'P{i}':1.0 for i in range(1,31)} for i in range(25,31): feature_weights[f'P{i}'] = config['tail_weight'] # 加载数据 raw_data, features = load_otdr_data(data_path) print(f"数据加载完成,样本数: {len(raw_data)}") # 模型训练 model = JupyterBayesianPartitioner( m_partitions=config['m_partitions'], feature_weights=feature_weights ) model.fit(raw_data[features].values) # 异常评分 scores = [] for i, row in raw_data.iterrows(): score = 1 - model.partition_probs.get( tuple(model._create_partitions(model.scaler.transform( [model._apply_weights(row[features].values)] )[0])), 0) scores.append(score) if i % 100 == 0: plt.clf() plt.plot(scores, alpha=0.7) plt.title('Real-time Anomaly Scores') plt.xlabel('Sample Index') plt.ylabel('Score') display(plt.gcf()) clear_output(wait=True) # 清除输出 raw_data['Anomaly_Score'] = scores q75 = np.percentile(scores, 75) raw_data['Anomaly_Score'] = np.clip((raw_data.Anomaly_Score - q75)/(1 - q75), 0, 1) raw_data['Is_Anomaly'] = raw_data.Anomaly_Score > config['threshold'] # 结果分析 print(f"\n检测完成! 耗时: {time.time()-start_time:.2f}s") print(f"异常阈值: {config['threshold']} | 尾部特征权重: {config['tail_weight']}x") print(f"异常样本占比: {raw_data.Is_Anomaly.mean():.2%}") # 可视化 fig, ax = plt.subplots(2,1,figsize=(12,6)) ax[0].hist(raw_data.Anomaly_Score, bins=20) ax[0].set_title('Anomaly Score Distribution') ax[1].scatter(raw_data.Position, raw_data.SNR, c=raw_data.Anomaly_Score, cmap='Reds') ax[1].set_title('Geographical Distribution') plt.tight_layout() plt.show() return raw_data # %% [markdown] # ### 5. 执行检测(带交互控件) # %% # 配置参数(可通过Jupyter widgets调整) config = { 'm_partitions': 8, 'threshold': 0.65, 'tail_weight': 1.5, 'data_path': 'D://SF//YBSJ//OTDR_data1.csv' # 可替换为真实数据路径 } # 执行检测 results = jupyter_detect_anomalies(config['data_path'], config) # 显示结果 display(results[['SNR', 'Position', 'Anomaly_Score']].head(10)) print("\n异常位置统计:") print(results[results.Is_Anomaly].Position.describe()) 最后不需要生成图片,只要生成文字和csv文件就好
05-24
为了实现仅生成文字输出和CSV文件而不生成图片,可以移除代码中与绘图相关的部分。以下是修改后的代码: ```python import pandas as pd import numpy as np from sklearn.preprocessing import StandardScaler from collections import defaultdict import time def load_otdr_data(path): """优化版数据加载,自动生成示例数据""" try: df = pd.read_csv(path) except FileNotFoundError: print(f"未找到文件 {path},生成示例数据...") np.random.seed(42) data = { 'SNR': np.random.uniform(20, 40, 1000), 'Position': np.random.randint(0, 100000, 1000), } for i in range(1,31): data[f'P{i}'] = np.random.normal(i*0.1, 1, 1000) df = pd.DataFrame(data) df.iloc[::50, 5:] += np.random.normal(5, 1, (20,25)) # 添加异常模式 waveform_features = [f'P{i}' for i in range(1,31)] return df[['SNR', 'Position'] + waveform_features], waveform_features class JupyterBayesianPartitioner: def __init__(self, m_partitions=8, alpha=1e-4, feature_weights=None): self.m = m_partitions self.alpha = alpha self.feature_weights = feature_weights or {f'P{i}':1.0 for i in range(1,31)} self.partition_probs = defaultdict(float) self.scaler = StandardScaler() def _create_partitions(self, data): if data.ndim == 1: data = data.reshape(1, -1) bins = np.percentile(data, np.linspace(0, 100, self.m + 1)) counts, _ = np.histogram(data, bins=bins) return counts def _apply_weights(self, sample): return [sample[i] * self.feature_weights.get(f'P{i+1}',1.0) for i in range(len(sample))] def fit(self, X): from IPython.display import clear_output X_weighted = np.array([self._apply_weights(x) for x in X]) self.scaler.fit(X_weighted) scaled = self.scaler.transform(X_weighted) partition_counts = defaultdict(int) for i, sample in enumerate(scaled): partition = tuple(self._create_partitions(sample)) partition_counts[partition] += 1 if i % 100 == 0: clear_output(wait=True) print(f"Training: {i+1}/{len(X)} samples processed") total = len(X) for k, count in partition_counts.items(): self.partition_probs[k] = (count + self.alpha) / (total + self.alpha * len(partition_counts)) def jupyter_detect_anomalies(data_path, config): """交互式检测流程(无图形输出)""" start_time = time.time() feature_weights = {f'P{i}':1.0 for i in range(1,31)} for i in range(25,31): feature_weights[f'P{i}'] = config['tail_weight'] raw_data, features = load_otdr_data(data_path) print(f"数据加载完成,样本数: {len(raw_data)}") model = JupyterBayesianPartitioner( m_partitions=config['m_partitions'], feature_weights=feature_weights ) model.fit(raw_data[features].values) scores = [] for i, row in raw_data.iterrows(): score = 1 - model.partition_probs.get( tuple(model._create_partitions(model.scaler.transform( [model._apply_weights(row[features].values)] )[0])), 0) scores.append(score) raw_data['Anomaly_Score'] = scores q75 = np.percentile(scores, 75) raw_data['Anomaly_Score'] = np.clip((raw_data.Anomaly_Score - q75)/(1 - q75), 0, 1) raw_data['Is_Anomaly'] = raw_data.Anomaly_Score > config['threshold'] print(f"\n检测完成! 耗时: {time.time()-start_time:.2f}s") print(f"异常阈值: {config['threshold']} | 尾部特征权重: {config['tail_weight']}x") print(f"异常样本占比: {raw_data.Is_Anomaly.mean():.2%}") # 输出结果到CSV文件 output_path = "anomaly_results.csv" raw_data.to_csv(output_path, index=False) print(f"结果已保存至: {output_path}") return raw_data # 配置参数 config = { 'm_partitions': 8, 'threshold': 0.65, 'tail_weight': 1.5, 'data_path': 'D://SF//YBSJ//OTDR_data1.csv' } # 执行检测 results = jupyter_detect_anomalies(config['data_path'], config) # 显示结果 print("\n前10行结果:") print(results[['SNR', 'Position', 'Anomaly_Score']].head(10)) print("\n异常位置统计:") print(results[results.Is_Anomaly].Position.describe()) ``` ### 解释 1. **移除绘图代码**:删除了所有与`matplotlib`相关的绘图代码。 2. **添加CSV输出**:在检测完成后,将结果保存为CSV文件,并打印文件路径。 3. **保留文字输出**:保留了关键的统计信息和进度提示。 --- ###
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值