sklearn.metrics.make_scorer详解

本文详细介绍了如何使用scikit-learn库中的make_scorer函数创建自定义评分标准,包括其参数设置和实际应用案例,对于理解和优化机器学习模型的评估指标具有指导意义。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

官方说明文档:点这里
从性能指标或损失函数中创建一个记分标准。
这个函数封装了用于GridSearchCV和cross_val_score的评分函数,它接受一个评分函数,如accuracy_score、mean_squared_error、adjusted_rand_index或average_precision,并返回一个可调用的值,该值对学习器的输出进行评分。
它的使用形式如下:

sklearn.metrics.make_scorer(score_func, greater_is_better=True, needs_proba=False, needs_threshold=False, **kwargs)

它的参数如下:

score_func一个可调用的记分函数,如sklearn.metrics.accuracy_score等,也可以是一个损失函数,如sklearn.metrics.mean_squared_error等
greater_is_better输入的score_func为记分函数时,该值为True(默认值);输入函数为损失函数时,该值为False;在后面这种情况中,得到的对象会对结果进行翻转,因为损失函数意味着函数值越小,拟合情况越好
needs_probascore_func是否需要获得一个predict_proba来得到分类器的概率估计,
如果是True,对于二元的标签值,score_function需要接受的y_pred是一个一维的
needs_thresholdscore_func是否具有连续的决策确定性。这只适用于使用具有decision_function或predict_proba方法的分类器的二元分类。如果为真,对于二进制y_true, score函数应该接受一维的y_pred(即,正类或决策函数的概率)。例如,平均精度或roc曲线下的面积不能使用离散预测来计算

这个函数返回的是一个可调用的记分对象。
实例如下:

model = LGBMRegressor(max_depth=5,num_leaves=10,objective="regression")
score_ = cross_val_score(model,X = X_train,y=Y_train,verbose=0,scoring=make_scorer(mean_squared_error))
print(score_)

输出结果:[0.46157155 0.47102463 0.47506401 0.44817591 0.46550807]

帮我解释下下面代码的意思: # -*- coding: utf-8 -*- """ Created on Sun Jul 20 16:00:01 2025 @author: srx20 """ import os import gc import numpy as np import pandas as pd import joblib import talib as ta from tqdm import tqdm import random from sklearn.cluster import MiniBatchKMeans from sklearn.preprocessing import StandardScaler from sklearn.model_selection import RandomizedSearchCV, GroupKFold from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import make_scorer, recall_score, classification_report import lightgbm as lgb import logging import psutil import warnings from scipy import sparse warnings.filterwarnings('ignore') # 设置日志记录 logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler('stock_prediction_fixed.log'), logging.StreamHandler() ] ) logger = logging.getLogger(__name__) # ========== 配置类 ========== class StockConfig: def __init__(self): # 数据路径 self.SH_PATH = r"D:\股票量化数据库\股票csv数据\上证" self.SZ_PATH = r"D:\股票量化数据库\股票csv数据\深证" # 时间范围 self.START_DATE = "2018-01-01" self.END_DATE = "2020-12-31" self.TEST_START = "2021-01-01" self.TEST_END = "2021-12-31" # 聚类设置 self.CLUSTER_NUM = 8 self.CLUSTER_FEATURES = [ 'price_change', 'volatility', 'volume_change', 'MA5', 'MA20', 'RSI14', 'MACD_hist' ] # 预测特征 (初始列表,实际使用时会动态更新) self.PREDICT_FEATURES = [ 'open', 'high', 'low', 'close', 'volume', 'price_change', 'volatility', 'volume_change', 'MA5', 'MA20', 'RSI14', 'MACD_hist', 'cluster', 'MOM10', 'ATR14', 'VWAP', 'RSI_diff', 'price_vol_ratio', 'MACD_RSI', 'advance_decline', 'day_of_week', 'month' ] # 模型参数优化范围(内存优化版) self.PARAM_GRID = { 'boosting_type': ['gbdt'], # 减少选项 'num_leaves': [31, 63], # 减少选项 'max_depth': [-1, 7], # 减少选项 'learning_rate': [0.01, 0.05], 'n_estimators': [300, 500], # 减少选项 'min_child_samples': [50], # 固定值 'min_split_gain': [0.0, 0.1], 'reg_alpha': [0, 0.1], 'reg_lambda': [0, 0.1], 'feature_fraction': [0.7, 0.9], 'bagging_fraction': [0.7, 0.9], 'bagging_freq': [1] } # 目标条件 self.MIN_GAIN = 0.05 self.MIN_LOW_RATIO = 0.98 # 调试模式 self.DEBUG_MODE = False self.MAX_STOCKS = 50 if self.DEBUG_MODE else None self.SAMPLE_FRACTION = 0.3 if not self.DEBUG_MODE else 1.0 # 采样比例 # ========== 内存管理工具 ========== def reduce_mem_usage(df): """优化DataFrame内存使用""" start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtype if col_type != object: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 logger.info(f'内存优化: 从 {start_mem:.2f} MB 减少到 {end_mem:.2f} MB ({100*(start_mem-end_mem)/start_mem:.1f}%)') return df def print_memory_usage(): """打印当前内存使用情况""" process = psutil.Process(os.getpid()) mem = process.memory_info().rss / (1024 ** 2) logger.info(f"当前内存使用: {mem:.2f} MB") # ========== 数据加载 (修复版) ========== def load_stock_data(sh_path, sz_path, start_date, end_date, sample_fraction=1.0, debug_mode=False, max_stocks=None): """加载股票数据,并过滤日期范围(修复随机抽样问题)""" stock_data = {} # 创建文件列表 all_files = [] for exchange, path in [('SH', sh_path), ('SZ', sz_path)]: if os.path.exists(path): csv_files = [f for f in os.listdir(path) if f.endswith('.csv')] for file in csv_files: all_files.append((exchange, path, file)) if not all_files: logger.warning("没有找到任何CSV文件") return stock_data # 随机抽样(修复一维问题) if sample_fraction < 1.0: sample_size = max(1, int(len(all_files) * sample_fraction)) # 使用random.sample代替np.random.choice all_files = random.sample(all_files, sample_size) logger.info(f"抽样 {len(all_files)} 只股票文件 (比例: {sample_fraction})") total_files = len(all_files) pbar = tqdm(total=total_files, desc='加载股票数据') loaded_count = 0 for exchange, path, file in all_files: if max_stocks is not None and loaded_count >= max_stocks: break if file.endswith('.csv'): stock_code = f"{exchange}_{file.split('.')[0]}" file_path = os.path.join(path, file) try: # 读取数据并验证列名 df = pd.read_csv(file_path) # 验证必要的列是否存在 required_cols = ['date', 'open', 'high', 'low', 'close', 'volume'] if not all(col in df.columns for col in required_cols): logger.warning(f"股票 {stock_code} 缺少必要列,跳过") pbar.update(1) continue # 转换日期并过滤 df['date'] = pd.to_datetime(df['date']) df = df[(df['date'] >= start_date) & (df['date'] <= end_date)] if len(df) < 100: # 至少100个交易日 logger.info(f"股票 {stock_code} 数据不足({len(df)}条),跳过") pbar.update(1) continue # 转换数据类型 for col in ['open', 'high', 'low', 'close']: df[col] = pd.to_numeric(df[col], errors='coerce').astype(np.float32) df['volume'] = pd.to_numeric(df['volume'], errors='coerce').astype(np.uint32) # 删除包含NaN的行 df = df.dropna(subset=required_cols) if len(df) > 0: stock_data[stock_code] = df loaded_count += 1 logger.debug(f"成功加载股票 {stock_code},数据条数: {len(df)}") else: logger.warning(f"股票 {stock_code} 过滤后无数据") except Exception as e: logger.error(f"加载股票 {stock_code} 失败: {str(e)}", exc_info=True) pbar.update(1) # 调试模式只处理少量股票 if debug_mode and loaded_count >= 10: logger.info("调试模式: 已加载10只股票,提前结束") break pbar.close() logger.info(f"成功加载 {len(stock_data)} 只股票数据") return stock_data # ========== 特征工程 (修复版) ========== class FeatureEngineer: def __init__(self, config): self.config = config def safe_fillna(self, series, default=0): """安全填充NaN值""" if isinstance(series, pd.Series): return series.fillna(default) elif isinstance(series, np.ndarray): return np.nan_to_num(series, nan=default) return series def transform(self, df): """添加技术指标特征(修复NumPy数组问题)""" try: # 创建临时副本用于TA-Lib计算 df_temp = df.copy() # 将价格列转换为float64以满足TA-Lib要求 for col in ['open', 'high', 'low', 'close']: df_temp[col] = df_temp[col].astype(np.float64) # 基础特征 df['price_change'] = df['close'].pct_change().fillna(0) df['volatility'] = df['close'].rolling(5).std().fillna(0) df['volume_change'] = df['volume'].pct_change().fillna(0) df['MA5'] = df['close'].rolling(5).mean().fillna(0) df['MA20'] = df['close'].rolling(20).mean().fillna(0) # 技术指标 - 修复NumPy数组问题 rsi = ta.RSI(df_temp['close'].values, timeperiod=14) df['RSI14'] = self.safe_fillna(rsi, 50) macd, macd_signal, macd_hist = ta.MACD( df_temp['close'].values, fastperiod=12, slowperiod=26, signalperiod=9 ) df['MACD_hist'] = self.safe_fillna(macd_hist, 0) # 新增特征 mom = ta.MOM(df_temp['close'].values, timeperiod=10) df['MOM10'] = self.safe_fillna(mom, 0) atr = ta.ATR( df_temp['high'].values, df_temp['low'].values, df_temp['close'].values, timeperiod=14 ) df['ATR14'] = self.safe_fillna(atr, 0) # 成交量加权平均价 vwap = (df['volume'] * (df['high'] + df['low'] + df['close']) / 3).cumsum() / df['volume'].cumsum() df['VWAP'] = self.safe_fillna(vwap, 0) # 相对强弱指数差值 df['RSI_diff'] = df['RSI14'] - df['RSI14'].rolling(5).mean().fillna(0) # 价格波动比率 df['price_vol_ratio'] = df['price_change'] / (df['volatility'].replace(0, 1e-8) + 1e-8) # 技术指标组合特征 df['MACD_RSI'] = df['MACD_hist'] * df['RSI14'] # 市场情绪指标 df['advance_decline'] = (df['close'] > df['open']).astype(int).rolling(5).sum().fillna(0) # 时间特征 df['day_of_week'] = df['date'].dt.dayofweek df['month'] = df['date'].dt.month # 处理无穷大和NaN df = df.replace([np.inf, -np.inf], np.nan) df = df.fillna(0) # 优化内存 return reduce_mem_usage(df) except Exception as e: logger.error(f"特征工程失败: {str(e)}", exc_info=True) # 返回基本特征作为回退方案 df['price_change'] = df['close'].pct_change().fillna(0) df['volatility'] = df['close'].rolling(5).std().fillna(0) df['volume_change'] = df['volume'].pct_change().fillna(0) df['MA5'] = df['close'].rolling(5).mean().fillna(0) df['MA20'] = df['close'].rolling(20).mean().fillna(0) # 填充缺失的技术指标 for col in self.config.PREDICT_FEATURES: if col not in df.columns: df[col] = 0 return df # ========== 聚类模型 (添加保存/加载功能) ========== class StockCluster: def __init__(self, config): self.config = config self.scaler = StandardScaler() self.kmeans = MiniBatchKMeans( n_clusters=config.CLUSTER_NUM, random_state=42, batch_size=1000 ) self.cluster_map = {} # 股票代码到聚类ID的映射 self.model_file = "stock_cluster_model.pkl" # 模型保存路径 def save(self): """保存聚类模型到文件""" # 创建包含所有必要组件的字典 model_data = { 'kmeans': self.kmeans, 'scaler': self.scaler, 'cluster_map': self.cluster_map, 'config_cluster_num': self.config.CLUSTER_NUM } # 使用joblib保存模型 joblib.dump(model_data, self.model_file) logger.info(f"聚类模型已保存到: {self.model_file}") def load(self): """从文件加载聚类模型""" if os.path.exists(self.model_file): model_data = joblib.load(self.model_file) self.kmeans = model_data['kmeans'] self.scaler = model_data['scaler'] self.cluster_map = model_data['cluster_map'] logger.info(f"从 {self.model_file} 加载聚类模型") return True else: logger.warning("聚类模型文件不存在,需要重新训练") return False def fit(self, stock_data): """训练聚类模型""" logger.info("开始股票聚类分析...") cluster_features = [] # 提取每只股票的特征 for stock_code, df in tqdm(stock_data.items(), desc="提取聚类特征"): if len(df) < 100: # 至少100个交易日 continue features = {} for feat in self.config.CLUSTER_FEATURES: if feat in df.columns: # 使用统计特征 features[f"{feat}_mean"] = df[feat].mean() features[f"{feat}_std"] = df[feat].std() else: # 特征缺失时填充0 features[f"{feat}_mean"] = 0 features[f"{feat}_std"] = 0 cluster_features.append(features) if not cluster_features: logger.warning("没有可用的聚类特征,使用默认聚类") # 创建默认聚类映射 self.cluster_map = {code: 0 for code in stock_data.keys()} return self # 创建特征DataFrame feature_df = pd.DataFrame(cluster_features) feature_df = reduce_mem_usage(feature_df) # 标准化特征 scaled_features = self.scaler.fit_transform(feature_df) # 聚类 self.kmeans.fit(scaled_features) clusters = self.kmeans.predict(scaled_features) feature_df['cluster'] = clusters # 创建股票到聚类的映射 stock_codes = list(stock_data.keys())[:len(clusters)] # 确保长度匹配 for i, stock_code in enumerate(stock_codes): self.cluster_map[stock_code] = clusters[i] logger.info("聚类分布统计:") logger.info(feature_df['cluster'].value_counts().to_string()) logger.info(f"股票聚类完成,共分为 {self.config.CLUSTER_NUM} 个类别") # 训练完成后自动保存模型 self.save() return self def transform(self, df, stock_code): """为数据添加聚类特征""" cluster_id = self.cluster_map.get(stock_code, -1) # 默认为-1表示未知聚类 df['cluster'] = cluster_id return df # ========== 目标创建 ========== class TargetCreator: def __init__(self, config): self.config = config def create_targets(self, df): """创建目标变量""" # 计算次日涨幅 df['next_day_gain'] = df['close'].shift(-1) / df['close'] - 1 # 计算次日最低价与开盘价比例 df['next_day_low_ratio'] = df['low'].shift(-1) / df['open'].shift(-1) # 创建复合目标:涨幅>5% 且 最低价≥开盘价98% df['target'] = 0 mask = (df['next_day_gain'] > self.config.MIN_GAIN) & \ (df['next_day_low_ratio'] >= self.config.MIN_LOW_RATIO) df.loc[mask, 'target'] = 1 # 删除最后一行(没有次日数据) df = df.iloc[:-1] # 检查目标分布 target_counts = df['target'].value_counts() logger.info(f"目标分布: 0={target_counts.get(0, 0)}, 1={target_counts.get(1, 0)}") return df # ========== 模型训练 (内存优化版) ========== class StockModelTrainer: def __init__(self, config): self.config = config self.model_name = "stock_prediction_model" self.feature_importance = None def prepare_dataset(self, stock_data, cluster_model, feature_engineer): """准备训练数据集(内存优化版)""" logger.info("准备训练数据集...") X_list = [] y_list = [] stock_group_list = [] # 用于分组交叉验证 target_creator = TargetCreator(self.config) # 使用生成器减少内存占用 for stock_code, df in tqdm(stock_data.items(), desc="处理股票数据"): try: # 特征工程 df = feature_engineer.transform(df.copy()) # 添加聚类特征 df = cluster_model.transform(df, stock_code) # 创建目标 df = target_creator.create_targets(df) # 只保留所需特征和目标 features = self.config.PREDICT_FEATURES if 'target' not in df.columns: logger.warning(f"股票 {stock_code} 缺少目标列,跳过") continue X = df[features] y = df['target'] # 确保没有NaN值 if X.isnull().any().any(): logger.warning(f"股票 {stock_code} 特征包含NaN值,跳过") continue # 使用稀疏矩阵存储(减少内存) sparse_X = sparse.csr_matrix(X.values.astype(np.float32)) X_list.append(sparse_X) y_list.append(y.values) stock_group_list.extend([stock_code] * len(X)) # 为每个样本添加股票代码作为组标识 # 定期清理内存 if len(X_list) % 100 == 0: gc.collect() print_memory_usage() except Exception as e: logger.error(f"处理股票 {stock_code} 失败: {str(e)}", exc_info=True) if not X_list: logger.error("没有可用的训练数据") return None, None, None # 合并所有数据 X_full = sparse.vstack(X_list) y_full = np.concatenate(y_list) groups = np.array(stock_group_list) logger.info(f"数据集准备完成,样本数: {X_full.shape[0]}") logger.info(f"目标分布: 0={sum(y_full==0)}, 1={sum(y_full==1)}") return X_full, y_full, groups def feature_selection(self, X, y): """执行特征选择(内存优化版)""" logger.info("执行特征选择...") # 使用基模型评估特征重要性 base_model = lgb.LGBMClassifier( n_estimators=100, random_state=42, n_jobs=-1 ) # 分批训练(减少内存占用) batch_size = 100000 for i in range(0, X.shape[0], batch_size): end_idx = min(i + batch_size, X.shape[0]) X_batch = X[i:end_idx].toarray() if sparse.issparse(X) else X[i:end_idx] y_batch = y[i:end_idx] if i == 0: base_model.fit(X_batch, y_batch) else: base_model.fit(X_batch, y_batch, init_model=base_model) # 获取特征重要性 importance = pd.Series(base_model.feature_importances_, index=self.config.PREDICT_FEATURES) importance = importance.sort_values(ascending=False) logger.info("特征重要性:\n" + importance.to_string()) # 选择前K个重要特征 k = min(15, len(self.config.PREDICT_FEATURES)) selected_features = importance.head(k).index.tolist() logger.info(f"选择前 {k} 个特征: {selected_features}") # 更新配置中的特征列表 self.config.PREDICT_FEATURES = selected_features # 转换特征矩阵 if sparse.issparse(X): # 对于稀疏矩阵,我们需要重新索引 feature_indices = [self.config.PREDICT_FEATURES.index(f) for f in selected_features] X_selected = X[:, feature_indices] else: X_selected = X[selected_features] return X_selected, selected_features def train_model(self, X, y, groups): """训练并优化模型(内存优化版)""" if X is None or len(y) == 0: logger.error("训练数据为空,无法训练模型") return None logger.info("开始训练模型...") # 1. 处理类别不平衡 pos_count = sum(y == 1) neg_count = sum(y == 0) scale_pos_weight = neg_count / pos_count logger.info(f"类别不平衡处理: 正样本权重 = {scale_pos_weight:.2f}") # 2. 特征选择 X_selected, selected_features = self.feature_selection(X, y) # 3. 自定义评分函数 - 关注正类召回率 def positive_recall_score(y_true, y_pred): return recall_score(y_true, y_pred, pos_label=1) custom_scorer = make_scorer(positive_recall_score, greater_is_better=True) # 4. 使用分组时间序列交叉验证(减少折数) group_kfold = GroupKFold(n_splits=2) # 减少折数以节省内存 cv = list(group_kfold.split(X_selected, y, groups=groups)) # 5. 创建模型 model = lgb.LGBMClassifier( objective='binary', random_state=42, n_jobs=-1, scale_pos_weight=scale_pos_weight, verbose=-1 ) # 6. 参数搜索(减少迭代次数) search = RandomizedSearchCV( estimator=model, param_distributions=self.config.PARAM_GRID, n_iter=10, # 减少迭代次数以节省内存 scoring=custom_scorer, cv=cv, verbose=2, n_jobs=1, # 减少并行任务以节省内存 pre_dispatch='2*n_jobs', # 控制任务分发 random_state=42 ) logger.info("开始参数搜索...") # 分批处理数据(减少内存占用) if sparse.issparse(X_selected): X_dense = X_selected.toarray() # 转换为密集矩阵用于搜索 else: X_dense = X_selected search.fit(X_dense, y) # 7. 使用最佳参数训练最终模型 best_params = search.best_params_ logger.info(f"最佳参数: {best_params}") logger.info(f"最佳召回率: {search.best_score_}") final_model = lgb.LGBMClassifier( **best_params, objective='binary', random_state=42, n_jobs=-1, scale_pos_weight=scale_pos_weight ) # 使用早停策略训练最终模型 logger.info("训练最终模型...") final_model.fit( X_dense, y, eval_set=[(X_dense, y)], eval_metric='binary_logloss', callbacks=[ lgb.early_stopping(stopping_rounds=50, verbose=False), lgb.log_evaluation(period=100) ] ) # 保存特征重要性 self.feature_importance = pd.Series( final_model.feature_importances_, index=selected_features ).sort_values(ascending=False) # 8. 保存模型 model_path = f"{self.model_name}.pkl" joblib.dump((final_model, selected_features), model_path) logger.info(f"模型已保存到: {model_path}") return final_model def evaluate_model(self, model, X_test, y_test): """评估模型性能""" if model is None or len(X_test) == 0: logger.warning("无法评估模型,缺少数据或模型") return # 预测测试集 y_pred = model.predict(X_test) # 计算召回率 recall = recall_score(y_test, y_pred, pos_label=1) logger.info(f"测试集召回率: {recall:.4f}") # 计算满足条件的样本比例 condition_ratio = sum(y_test == 1) / len(y_test) logger.info(f"满足条件的样本比例: {condition_ratio:.4f}") # 详细分类报告 report = classification_report(y_test, y_pred) logger.info("分类报告:\n" + report) # 特征重要性 if self.feature_importance is not None: logger.info("特征重要性:\n" + self.feature_importance.to_string()) # ========== 主程序 ========== def main(): # 初始化配置 config = StockConfig() logger.info("===== 股票上涨预测程序 (修复版) =====") # 加载训练数据(添加抽样) logger.info(f"加载训练数据: {config.START_DATE} 至 {config.END_DATE}") train_data = load_stock_data( config.SH_PATH, config.SZ_PATH, config.START_DATE, config.END_DATE, sample_fraction=config.SAMPLE_FRACTION, debug_mode=config.DEBUG_MODE, max_stocks=config.MAX_STOCKS ) if not train_data: logger.error("错误: 没有加载到任何股票数据,请检查数据路径和格式") return # 特征工程 feature_engineer = FeatureEngineer(config) # 聚类分析 - 尝试加载现有模型,否则训练新模型 cluster_model = StockCluster(config) if not cluster_model.load(): # 尝试加载模型 try: cluster_model.fit(train_data) except Exception as e: logger.error(f"聚类分析失败: {str(e)}", exc_info=True) # 创建默认聚类映射 cluster_model.cluster_map = {code: 0 for code in train_data.keys()} logger.info("使用默认聚类(所有股票归为同一类)") cluster_model.save() # 保存默认聚类模型 # 准备训练数据 trainer = StockModelTrainer(config) try: X_train, y_train, groups = trainer.prepare_dataset( train_data, cluster_model, feature_engineer ) except Exception as e: logger.error(f"准备训练数据失败: {str(e)}", exc_info=True) return if X_train is None or len(y_train) == 0: logger.error("错误: 没有可用的训练数据") return # 训练模型 model = trainer.train_model(X_train, y_train, groups) if model is None: logger.error("模型训练失败") return # 加载测试数据(添加抽样) logger.info(f"\n加载测试数据: {config.TEST_START} 至 {config.TEST_END}") test_data = load_stock_data( config.SH_PATH, config.SZ_PATH, config.TEST_START, config.TEST_END, sample_fraction=config.SAMPLE_FRACTION, debug_mode=config.DEBUG_MODE, max_stocks=config.MAX_STOCKS ) if test_data: # 准备测试数据 X_test, y_test, _ = trainer.prepare_dataset( test_data, cluster_model, feature_engineer ) if X_test is not None and len(y_test) > 0: # 评估模型 if sparse.issparse(X_test): X_test = X_test.toarray() trainer.evaluate_model(model, X_test, y_test) else: logger.warning("测试数据准备失败,无法评估模型") else: logger.warning("没有测试数据可用") logger.info("===== 程序执行完成 =====") if __name__ == "__main__": main()
07-21
AttributeError: 'numpy.ndarray' object has no attribute 'fillna' 2025-07-19 17:07:14,762 - INFO - 目标分布: 0=713, 1=16 2025-07-19 17:07:14,770 - ERROR - 特征工程失败: 'numpy.ndarray' object has no attribute 'fillna' Traceback (most recent call last): File "d:\股票量化数据库\股票量化数据库\untitled4.py", line 232, in transform df['RSI14'] = ta.RSI(df_temp['close'].values, timeperiod=14).fillna(50) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ AttributeError: 'numpy.ndarray' object has no attribute 'fillna' 2025-07-19 17:07:14,778 - INFO - 目标分布: 0=724, 1=5 处理股票数据: 100%|██████████| 6723/6723 [02:17<00:00, 48.99it/s] 2025-07-19 17:07:19,387 - INFO - 数据集准备完成,样本数: 4285658 2025-07-19 17:07:20,151 - INFO - 目标分布: 0=4171023, 1=114635 2025-07-19 17:07:21,795 - INFO - 开始训练模型... 2025-07-19 17:07:22,508 - INFO - 类别不平衡处理: 正样本权重 = 36.39 2025-07-19 17:07:22,509 - INFO - 执行特征选择... [LightGBM] [Info] Number of positive: 114635, number of negative: 4171023 [LightGBM] [Info] Auto-choosing row-wise multi-threading, the overhead of testing was 0.021112 seconds. You can set `force_row_wise=true` to remove the overhead. And if memory is not enough, you can set `force_col_wise=true`. [LightGBM] [Info] Total Bins 2550 [LightGBM] [Info] Number of data points in the train set: 4285658, number of used features: 10 [LightGBM] [Info] [binary:BoostFromScore]: pavg=0.026749 -> initscore=-3.594163 [LightGBM] [Info] Start training from score -3.594163 2025-07-19 17:07:31,028 - INFO - 特征重要性: volatility 563 price_change 499 volume 491 volume_change 339 MA20 318 MA5 226 low 175 high 144 open 135 close 110 RSI_diff 0 day_of_week 0 advance_decline 0 MACD_RSI 0 price_vol_ratio 0 MACD_hist 0 VWAP 0 ATR14 0 MOM10 0 cluster 0 RSI14 0 month 0 2025-07-19 17:07:31,028 - INFO - 选择前 15 个特征: ['volatility', 'price_change', 'volume', 'volume_change', 'MA20', 'MA5', 'low', 'high', 'open', 'close', 'RSI_diff', 'day_of_week', 'advance_decline', 'MACD_RSI', 'price_vol_ratio'] 2025-07-19 17:07:32,098 - INFO - 开始参数搜索... Fitting 3 folds for each of 20 candidates, totalling 60 fits Traceback (most recent call last): File D:\Anaconda\Lib\site-packages\spyder_kernels\py3compat.py:356 in compat_exec exec(code, globals, locals) File d:\股票量化数据库\股票量化数据库\untitled4.py:687 main() File d:\股票量化数据库\股票量化数据库\untitled4.py:655 in main model = trainer.train_model(X_train, y_train, groups) File d:\股票量化数据库\股票量化数据库\untitled4.py:543 in train_model search.fit(X_selected, y) File D:\Anaconda\Lib\site-packages\sklearn\model_selection\_search.py:874 in fit self._run_search(evaluate_candidates) File D:\Anaconda\Lib\site-packages\sklearn\model_selection\_search.py:1768 in _run_search evaluate_candidates( File D:\Anaconda\Lib\site-packages\sklearn\model_selection\_search.py:821 in evaluate_candidates out = parallel( File D:\Anaconda\Lib\site-packages\sklearn\utils\parallel.py:63 in __call__ return super().__call__(iterable_with_config) File D:\Anaconda\Lib\site-packages\joblib\parallel.py:1098 in __call__ self.retrieve() File D:\Anaconda\Lib\site-packages\joblib\parallel.py:975 in retrieve self._output.extend(job.get(timeout=self.timeout)) File D:\Anaconda\Lib\site-packages\joblib\_parallel_backends.py:567 in wrap_future_result return future.result(timeout=timeout) File D:\Anaconda\Lib\concurrent\futures\_base.py:456 in result return self.__get_result() File D:\Anaconda\Lib\concurrent\futures\_base.py:401 in __get_result raise self._exception MemoryError: Unable to allocate 54.5 MiB for an array with shape (5, 1428553) and data type int64
07-20
先前代码为: import pandas as pd import numpy as np import os import sys import io # ------------------------- 修复后的编码兼容设置 ------------------------- # 仅当sys.stdout有buffer属性时重定向(避免在Jupyter环境中出错) if hasattr(sys.stdout, 'buffer'): sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') if hasattr(sys.stderr, 'buffer'): sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8') # 设置Python环境变量 os.environ["PYTHONIOENCODING"] = "utf-8" # --------------------------------------------------------------- from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression, Ridge, Lasso from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.svm import SVR from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error import matplotlib.pyplot as plt import seaborn as sns # 1. 加载数据 file_path = r"C:\Users\刘涵\Desktop\数模标准\模拟题一\C题\卷烟吸阻数据.xlsx" # 读取标准化数据 df = pd.read_excel(file_path, sheet_name='标准化数据', engine='openpyxl') # 显示数据基本信息 print("数据基本信息:") print(df.info()) print("\n前5行数据:") print(df.head()) print("\n描述性统计:") print(df.describe()) # 2. 数据预处理 # 检查目标列是否存在 if '吸阻(Pa)' not in df.columns: raise ValueError("数据集中未找到'吸阻(Pa)'列,请检查数据") # 分离特征和目标变量 X = df.drop(columns=['吸阻(Pa)']).values # 特征数组 y = df['吸阻(Pa)'].values # 目标变量数组 # 获取特征名称(用于后续可视化) feature_names = df.drop(columns=['吸阻(Pa)']).columns.tolist() # 3. 划分训练集和测试集 X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # 4. 定义模型及超参数网格 models = { '线性回归': LinearRegression(), '岭回归': Ridge(), '随机森林': RandomForestRegressor(random_state=42, n_jobs=1), # 设置n_jobs=1避免并行问题 '梯度提升': GradientBoostingRegressor(random_state=42), } param_grids = { '岭回归': {'alpha': [0.1, 1, 10]}, '随机森林': { 'n_estimators': [50, 100], 'max_depth': [5, 10, None], }, '梯度提升': { 'n_estimators': [50, 100], 'learning_rate': [0.05, 0.1], 'max_depth': [3, 5] } } # 5. 模型训练与调优 results = {} best_models = {} print("\n开始模型训练...") for name, model in models.items(): print(f"\n=== 训练模型: {name} ===") try: if name in param_grids: # 使用网格搜索优化超参数 grid_search = GridSearchCV( estimator=model, param_grid=param_grids[name], cv=3, # 减少交叉验证折数以提高速度 scoring='neg_mean_squared_error', n_jobs=1 # 避免并行问题 ) grid_search.fit(X_train, y_train) best_model = grid_search.best_estimator_ best_params = grid_search.best_params_ print(f"最佳参数: {best_params}") else: # 直接训练基础模型 best_model = model.fit(X_train, y_train) best_params = "无超参数可调" # 预测与评估 y_pred = best_model.predict(X_test) mse = mean_squared_error(y_test, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) results[name] = { 'MSE': mse, 'RMSE': rmse, 'MAE': mae, 'R²': r2, '最佳参数': best_params } best_models[name] = best_model print(f"测试集评估结果:") print(f"MSE = {mse:.2f}") print(f"RMSE = {rmse:.2f}") print(f"MAE = {mae:.2f}") print(f"R² = {r2:.4f}") except Exception as e: print(f"训练模型 {name} 时出错: {str(e)}") import traceback traceback.print_exc() continue # 6. 模型性能对比 if results: results_df = pd.DataFrame(results).T print("\n=== 模型性能对比 ===") # 按R²排序并格式化数值 results_df['R²'] = results_df['R²'].map('{:.4f}'.format) results_df['MSE'] = results_df['MSE'].map('{:.2f}'.format) results_df['RMSE'] = results_df['RMSE'].map('{:.2f}'.format) results_df['MAE'] = results_df['MAE'].map('{:.2f}'.format) print(results_df.sort_values('R²', ascending=False)) else: print("所有模型训练失败,请检查数据和环境配置。") # 7. 可视化分析 if best_models: try: # 创建对比图表 plt.figure(figsize=(12, 8)) # 模型性能对比柱状图 plt.subplot(2, 1, 1) r2_values = [results[name]['R²'] for name in results] plt.bar(results.keys(), r2_values, color='skyblue') plt.title('模型R²分数对比') plt.ylabel('R²') plt.ylim(0, 1) # 预测值 vs 真实值散点图(使用最佳模型) best_model_name = max(results, key=lambda k: results[k]['R²']) best_model = best_models[best_model_name] y_pred = best_model.predict(X_test) plt.subplot(2, 1, 2) plt.scatter(y_test, y_pred, alpha=0.5) plt.plot([min(y), max(y)], [min(y), max(y)], 'r--') plt.title(f'{best_model_name}预测结果 vs 真实值') plt.xlabel('真实值') plt.ylabel('预测值') plt.tight_layout() plt.show() # 特征重要性(如果模型支持) if hasattr(best_model, 'feature_importances_'): feature_importances = best_model.feature_importances_ # 创建特征重要性DataFrame feature_importance_df = pd.DataFrame({ '特征': feature_names, '重要性': feature_importances }).sort_values('重要性', ascending=False) plt.figure(figsize=(10, 6)) sns.barplot(x='重要性', y='特征', data=feature_importance_df.head(10)) plt.title(f'{best_model_name} - Top 10 重要特征') plt.show() except Exception as e: print(f"可视化时出错: {str(e)}") import traceback traceback.print_exc() # 8. 模型保存(修复了scaler未定义的问题) try: if best_models: best_model_name = max(results, key=lambda k: results[k]['R²']) best_model = best_models[best_model_name] import joblib joblib.dump(best_model, 'best_model.pkl') print(f"已保存最佳模型({best_model_name})") # 由于使用的是标准化后的数据,不需要保存scaler print("注意:使用的是标准化后的数据,无需保存标准化器") except Exception as e: print(f"保存模型时出错: {str(e)}") 得到: === 模型性能对比 === MSE RMSE MAE R² \ 梯度提升 0.37 0.60 0.47 0.6379 随机森林 0.38 0.62 0.48 0.6239 岭回归 0.39 0.63 0.50 0.6108 线性回归 0.40 0.63 0.50 0.6077 最佳参数 梯度提升 {'learning_rate': 0.05, 'max_depth': 3, 'n_est... 随机森林 {'max_depth': 5, 'n_estimators': 100} 岭回归 {'alpha': 10} 线性回归 无超参数可调 如何4类模型进行更优化后再对比选择,输出能直接放在先前代码后运行的代码
最新发布
08-11
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值