php中使用split()出现Warning: split(): REG_EMPTY

PHP split()警告
本文解决PHP中使用split()函数拆分字符串时遇到的Warning:split():REG_EMPTY警告问题。通过添加转义符号来正确地使用该函数。

【错误内容】:当PHP中使用split()拆分字符时,出现Warning: split(): REG_EMPTY.

【解决办法】:使用转义符号,即split('/|', $key);

帮我改为五折交叉验证: import pandas as pd import numpy as np import xgboost as xgb import matplotlib.pyplot as plt import joblib import warnings import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.impute import SimpleImputer from collections import Counter # 忽略特定警告 warnings.filterwarnings('ignore', category=UserWarning) warnings.filterwarnings('ignore', category=FutureWarning) # --- 配置 --- INPUT_FILE = './data/100-1500pkt_feaures/900_gather_feature_results.csv' # INPUT_FILE = './data/900_gather_feature_results_augmented.csv' # 实际的分类标签列 APP_CLASS_COLUMN = 'second_id' # 用于划分“未见应用”的列 APP_SPLIT_COLUMN = 'app_name' TEST_SIZE_RATIO = 0.2 # 将 20% 的 app_name 类别保留给测试集 MIN_SAMPLES_THRESHOLD = 1 # 最小样本数阈值,低于此值的 app_name 将被移除 FEATURE_COLUMNS = [ 'src_port', 'dst_port', 'protocol', 'duration', 'time_limit_duration', 'packets_count', 'total_packets_count', 'fwd_packets_count', 'bwd_packets_count', 'total_payload_bytes', 'fwd_total_payload_bytes', 'bwd_total_payload_bytes', 'payload_bytes_max', 'payload_bytes_min', 'payload_bytes_mean', 'payload_bytes_variance', 'payload_bytes_skewness', 'payload_bytes_cov', 'fwd_payload_bytes_max', 'fwd_payload_bytes_min', 'fwd_payload_bytes_mean', 'fwd_payload_bytes_variance', 'fwd_payload_bytes_skewness', 'fwd_payload_bytes_cov', 'bwd_payload_bytes_max', 'bwd_payload_bytes_min', 'bwd_payload_bytes_mean', 'bwd_payload_bytes_variance', 'bwd_payload_bytes_skewness', 'bwd_payload_bytes_cov', 'bytes_rate', 'fwd_bytes_rate', 'fwd_bytes_rate_in_fwd_packets', 'bwd_bytes_rate', 'bwd_bytes_rate_in_bwd_packets', 'packets_rate', 'bwd_packets_rate', 'bwd_packets_rate_in_bwd_packets', 'fwd_packets_rate', 'fwd_packets_rate_in_fwd_packets', 'down_up_rate', 'min_packets_delta_time', 'max_packets_delta_time', 'mean_packets_delta_time', 'variance_packets_delta_time', 'skewness_packets_delta_time', 'cov_packets_delta_time', 'min_bwd_packets_delta_time', 'max_bwd_packets_delta_time', 'mean_bwd_packets_delta_time', 'variance_bwd_packets_delta_time', 'skewness_bwd_packets_delta_time', 'cov_bwd_packets_delta_time', 'min_fwd_packets_delta_time', 'max_fwd_packets_delta_time', 'mean_fwd_packets_delta_time', 'variance_fwd_packets_delta_time', 'skewness_fwd_packets_delta_time', 'min_payload_bytes_delta_len', 'max_payload_bytes_delta_len', 'mean_payload_bytes_delta_len', 'variance_payload_bytes_delta_len', 'skewness_payload_bytes_delta_len', 'cov_payload_bytes_delta_len', 'min_bwd_payload_bytes_delta_len', 'max_bwd_payload_bytes_delta_len', 'mean_bwd_payload_bytes_delta_len', 'variance_bwd_payload_bytes_delta_len', 'skewness_bwd_payload_bytes_delta_len', 'cov_bwd_payload_bytes_delta_len', 'min_fwd_payload_bytes_delta_len', 'max_fwd_payload_bytes_delta_len', 'mean_fwd_payload_bytes_delta_len', 'variance_fwd_payload_bytes_delta_len', 'skewness_fwd_payload_bytes_delta_len', 'cov_fwd_payload_bytes_delta_len' ] # --- 新增核心函数:稀疏应用过滤 --- def filter_sparse_apps(data_df, split_col, class_col, threshold): """ 统计每个 second_id 下的 app_name 样本数,并移除样本数低于阈值的 app_name。 """ print("--- 执行稀疏应用(Sparse App)过滤 ---") initial_sample_count = len(data_df) initial_app_count = data_df[split_col].nunique() initial_class_count = data_df[class_col].nunique() # 1. 统计每个 app_name 的样本数 app_counts = data_df[split_col].value_counts() # 2. 找出样本数低于阈值的 app_name sparse_apps = app_counts[app_counts < threshold].index.tolist() # 3. 过滤数据 filtered_df = data_df[~data_df[split_col].isin(sparse_apps)].copy().reset_index(drop=True) filtered_app_count = filtered_df[split_col].nunique() filtered_class_count = filtered_df[class_col].nunique() removed_sample_count = initial_sample_count - len(filtered_df) print(f"原始样本总数: {initial_sample_count}") print(f"原始 app_name 类别数: {initial_app_count}") print(f"原始 {class_col} 类别数: {initial_class_count}") print(f"样本数低于 {threshold} 的稀疏 app_name 数量: {len(sparse_apps)}") print(f"已移除样本总数: {removed_sample_count}") print(f"过滤后样本总数: {len(filtered_df)}") print(f"过滤后 app_name 类别数: {filtered_app_count}") print(f"过滤后 {class_col} 类别数: {filtered_class_count}") # 4. 统计过滤后的 second_id 和 app_name 交叉统计 (仅作展示) print(f"\n过滤后 {class_col} 和 {split_col} 的样本分布 (Top 20 混淆源):") cross_tab = filtered_df.groupby([class_col, split_col]).size().reset_index(name='Count') print(cross_tab.sort_values(by='Count', ascending=False).head(20).to_markdown(index=False)) return filtered_df # --- 核心函数:数据划分 (基于 app_name) --- def split_data_by_app(data_df, split_col, test_ratio, random_state=42): """ 根据应用类别 (app_name) 进行划分,确保测试集中的 app_name 类别在训练集中不存在。 """ print("\n--- 执行未见应用(Unseen App)数据划分 ---") # 1. 获取所有独特的应用名称 all_apps = data_df[split_col].unique() # 2. 划分应用名称列表 train_apps, test_apps = train_test_split( all_apps, test_size=test_ratio, random_state=random_state, shuffle=True ) print(f"总应用类别数 ({split_col}): {len(all_apps)}") print(f"训练集应用类别数: {len(train_apps)}") print(f"测试集应用类别数: {len(test_apps)}") # 3. 根据应用名称划分数据 train_df = data_df[data_df[split_col].isin(train_apps)].copy().reset_index(drop=True) test_df = data_df[data_df[split_col].isin(test_apps)].copy().reset_index(drop=True) print(f"训练集样本总数: {len(train_df)}") print(f"测试集样本总数: {len(test_df)}") return train_df, test_df, test_apps # --- 核心函数:预处理和特征工程 (保持不变) --- def preprocess_data(df, feature_cols, label_col, label_encoder=None, scaler=None, imputer=None, is_train=True): """ 执行数据预处理:协议编码、缺失值/无穷值处理、异常值处理、标准化和标签编码。 """ X = df[feature_cols].copy() y = df[label_col].copy() print(f"\n处理 {('训练' if is_train else '测试')} 集数据...") # 1. 因子化协议类型 (必须在缺失值处理前进行) X['protocol'] = pd.factorize(X['protocol'])[0] # 2. 处理无穷大和NaN值 X = X.replace([np.inf, -np.inf], np.nan) # 3. 缺失值填充 (用中位数填充数值特征) if is_train: imputer = SimpleImputer(strategy='median') X_imputed = imputer.fit_transform(X) else: # 测试集使用训练集的 imputer X_imputed = imputer.transform(X) X = pd.DataFrame(X_imputed, columns=X.columns) # 4. 异常值处理 (仅对数值特征进行处理) def cap_outliers(df_in, columns): df_out = df_in.copy() for col in columns: if df_out[col].dtype in ['float64', 'int64'] and df_out[col].nunique() > 1: upper_bound = df_out[col].quantile(0.99) lower_bound = df_out[col].quantile(0.01) df_out[col] = np.where(df_out[col] > upper_bound, upper_bound, np.where(df_out[col] < lower_bound, lower_bound, df_out[col])) return df_out numeric_cols = X.select_dtypes(include=['float64', 'int64']).columns X = cap_outliers(X, numeric_cols) # 5. 数据标准化 if is_train: scaler = StandardScaler() X_scaled = scaler.fit_transform(X) else: # 测试集使用训练集的 scaler X_scaled = scaler.transform(X) X = pd.DataFrame(X_scaled, columns=X.columns) # 6. 标签编码 if is_train: label_encoder = LabelEncoder() y_encoded = label_encoder.fit_transform(y) else: # 测试集使用训练集的 encoder # 对于 Unseen App 任务,测试集中的 second_id 类别可能仍然是训练集已知的, # 因为 app_name 和 second_id 之间可能是多对一关系。 # 我们使用训练集拟合的 encoder 对测试集进行转换。 try: y_encoded = label_encoder.transform(y) except ValueError as e: # 这种情况可能会发生在过滤后,测试集中出现了训练集中未见的 second_id, # 但在 Unseen App 任务中,我们假设 second_id 是稳定的。 # 如果出现,说明数据或编码逻辑需要进一步检查。 print(f"警告:测试集包含训练集未见的 {label_col} 类别,使用 -1 标记。") y_encoded = y.apply(lambda x: label_encoder.transform([x])[0] if x in label_encoder.classes_ else -1).values # 移除所有 -1 的样本,因为它无法被模型分类 valid_indices = y_encoded != -1 X = X[valid_indices].copy().reset_index(drop=True) y_encoded = y_encoded[valid_indices] y = y[valid_indices].copy().reset_index(drop=True) print(f"移除 {len(y) - len(y_encoded)} 个在训练集中未见的 {label_col} 样本。") # 返回预处理后的特征、编码后的标签、以及训练阶段拟合的工具和原始标签 return X, y_encoded, label_encoder, scaler, imputer, y # --- 核心函数:详细报告和混淆分析 (已修改) --- def generate_detailed_report(X_test_df, y_test_original, y_pred_encoded, label_encoder, test_df_full): """ 生成分类报告,可视化混淆矩阵,并分析混淆最严重的 second_id 类别对及其涉及的 app_name。 """ # 1. 转换回原始标签 y_pred_original = label_encoder.inverse_transform(y_pred_encoded) # 2. 分类报告 print("\n================ 分类报告 ================") print(f"基于分类标签:{APP_CLASS_COLUMN}") report = classification_report(y_test_original, y_pred_original, digits=4) print(report) # 3. 混淆矩阵 all_classes = label_encoder.classes_ test_classes = y_test_original.unique() # 只针对测试集实际出现的类别生成混淆矩阵 cm = confusion_matrix(y_test_original, y_pred_original, labels=all_classes) cm_df = pd.DataFrame(cm, index=all_classes, columns=all_classes) # 筛选出在测试集实际有样本的行和列进行显示 cm_df_display = cm_df.loc[test_classes, test_classes] print(f"\n================ 混淆矩阵 ({APP_CLASS_COLUMN} 类别) ================") plt.figure(figsize=(12, 10)) sns.heatmap(cm_df_display, annot=True, fmt='d', cmap='Blues', linewidths=.5, cbar=False, annot_kws={"size": 8}) plt.title(f'Confusion Matrix (Test Classes: {APP_CLASS_COLUMN})', fontsize=16) plt.xlabel('Predicted Label', fontsize=12) plt.ylabel('True Label', fontsize=12) plt.xticks(rotation=45, ha='right') plt.yticks(rotation=0) plt.tight_layout() plt.savefig('confusion_matrix.png') plt.show() print("混淆矩阵图已保存为 confusion_matrix.png") # 4. 混淆类别分析 (重点:哪些 second_id 被混淆,哪些 app_name 涉及) print("\n================ 详细混淆类别分析 ================") # 将预测结果合并到原始测试数据中,以便进行回溯分析 test_analysis_df = test_df_full.copy() test_analysis_df['y_true'] = y_test_original test_analysis_df['y_pred'] = y_pred_original # 筛选出所有错误分类的样本 misclassified_df = test_analysis_df[test_analysis_df['y_true'] != test_analysis_df['y_pred']] if misclassified_df.empty: print("测试集中所有样本均被正确分类,没有混淆。") return # 统计混淆对 (True ID -> Predicted ID) 的数量 confusion_counts = misclassified_df.groupby(['y_true', 'y_pred']).size().reset_index(name='Count') confusion_counts = confusion_counts.sort_values(by='Count', ascending=False) # 计算每个真实类别的总样本数(用于计算 misclassification rate) true_class_counts = test_analysis_df['y_true'].value_counts() print(f"**混淆最严重的 TOP 10 类别对(真实 {APP_CLASS_COLUMN} -> 预测 {APP_CLASS_COLUMN}):**") print("-" * 80) # 使用新的结构化打印方式 rank = 1 for index, row in confusion_counts.head(10).iterrows(): true_id = row['y_true'] pred_id = row['y_pred'] count = row['Count'] # 找出该混淆对中涉及的 app_name involved_apps = misclassified_df[ (misclassified_df['y_true'] == true_id) & (misclassified_df['y_pred'] == pred_id) ][APP_SPLIT_COLUMN].unique() # 找出该 True ID 对应的所有 app_name (用于上下文) all_true_apps = test_analysis_df[ test_analysis_df['y_true'] == true_id ][APP_SPLIT_COLUMN].unique() # 计算该真实类别下的总样本数 total_true_count = true_class_counts.get(true_id, 0) # 计算该混淆对占真实类别的百分比 misclass_rate = (count / total_true_count) if total_true_count > 0 else 0 print(f"排名 {rank}: {true_id} -> {pred_id} (混淆次数: {count}, 占真实类别 {true_id} 样本的 {misclass_rate:.2%})") print(f" - 真实类别 {true_id} 总样本: {total_true_count} ") print(f" - 涉及的 {APP_SPLIT_COLUMN} (导致混淆): {', '.join(involved_apps)}") print(f" - {true_id} 对应的所有 {APP_SPLIT_COLUMN} (上下文): {', '.join(all_true_apps)}") print("-" * 80) rank += 1 # --- 主执行流程 (已修改) --- def run_classification_pipeline(): # 1. 读取数据 print("读取数据...") # 读取所需的所有列:特征、分类标签和划分标签 required_cols = FEATURE_COLUMNS + [APP_CLASS_COLUMN, APP_SPLIT_COLUMN] data = pd.read_csv(INPUT_FILE, usecols=required_cols) # 2. 稀疏应用过滤 (新增步骤) # 移除样本数低于 MIN_SAMPLES_THRESHOLD (默认 10) 的 app_name filtered_data = filter_sparse_apps(data, APP_SPLIT_COLUMN, APP_CLASS_COLUMN, MIN_SAMPLES_THRESHOLD) # 3. 划分训练集和测试集(Unseen App 策略,基于 app_name) # 确保 test_df_full 包含所有原始列,用于后续分析 train_df, test_df_full, _ = split_data_by_app(filtered_data, APP_SPLIT_COLUMN, TEST_SIZE_RATIO) # 4. 预处理 (训练集) X_train, y_train_encoded, label_encoder, scaler, imputer, _ = preprocess_data( train_df, FEATURE_COLUMNS, APP_CLASS_COLUMN, is_train=True ) num_classes = len(label_encoder.classes_) print(f"\n训练集检测到 {num_classes} 个分类标签 ({APP_CLASS_COLUMN})。") # 5. 预处理 (测试集) # 提取测试集的特征和标签,使用训练集拟合的工具进行转换 X_test, y_test_encoded, _, _, _, y_test_original = preprocess_data( test_df_full, FEATURE_COLUMNS, APP_CLASS_COLUMN, label_encoder, scaler, imputer, is_train=False ) # 如果测试集经过预处理后样本数量发生变化,需要确保它们匹配 if len(X_test) != len(y_test_encoded): print("警告:测试集样本数量在预处理后发生变化,请检查预处理步骤。") return # 6. 创建和训练XGBoost模型 print("\n创建XGBoost模型...") model = xgb.XGBClassifier( objective='multi:softprob', num_class=num_classes, n_estimators=500, max_depth=7, learning_rate=0.03, subsample=0.8, colsample_bytree=0.8, gamma=0.1, reg_alpha=0.1, reg_lambda=1, use_label_encoder=False, eval_metric='mlogloss', tree_method='hist', random_state=42, n_jobs=-1 ) print("开始训练模型...") model.fit( X_train, y_train_encoded, eval_set=[(X_train, y_train_encoded)], # 仅在训练集上进行早停评估 verbose=10, ) # 7. 预测测试集 print("\n进行预测...") y_pred_encoded = model.predict(X_test) # 8. 详细报告和混淆分析 generate_detailed_report(X_test, y_test_original, y_pred_encoded, label_encoder, test_df_full) # 9. 特征重要性分析 (使用训练集特征) print("\n================ 特征重要性分析 ================") importance = model.feature_importances_ sorted_idx = importance.argsort()[::-1] print("\nTop 20特征重要性:") feature_names = X_train.columns for i in sorted_idx[:20]: print(f"{feature_names[i]:<40}: {importance[i]:.4f}") # 可视化特征重要性 plt.figure(figsize=(10, 12)) plt.barh(range(20), importance[sorted_idx][:20], align='center') plt.yticks(range(20), np.array(feature_names)[sorted_idx][:20]) plt.xlabel("特征重要性") plt.title("Top 20 特征重要性") plt.gca().invert_yaxis() plt.tight_layout() plt.savefig('feature_importance.png') plt.show() # 10. 保存预处理管道和模型 print("\n保存模型和预处理工具...") preprocessing_pipeline = { 'imputer': imputer, 'scaler': scaler, 'label_encoder': label_encoder, 'feature_columns': FEATURE_COLUMNS, 'app_class_column': APP_CLASS_COLUMN } joblib.dump(preprocessing_pipeline, 'preprocessing_pipeline.pkl') model.save_model('xgboost_model_unseen_app_v2.json') print("所有处理完成!") if __name__ == '__main__': run_classification_pipeline()
11-07
#include "BackgroundSubtractorSuBSENSE.h" #include "DistanceUtils.h" #include "RandUtils.h" #include <iostream> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #include <iomanip> /* * * Intrinsic parameters for our method are defined here; tuning these for better * performance should not be required in most cases -- although improvements in * very specific scenarios are always possible. * */ //! defines the threshold value(s) used to detect long-term ghosting and trigger the fast edge-based absorption heuristic #define GHOSTDET_D_MAX (0.010f) // defines 'negligible' change here #define GHOSTDET_S_MIN (0.995f) // defines the required minimum local foreground saturation value //! parameter used to scale dynamic distance threshold adjustments ('R(x)') #define FEEDBACK_R_VAR (0.01f) //! parameters used to adjust the variation step size of 'v(x)' #define FEEDBACK_V_INCR (1.000f) #define FEEDBACK_V_DECR (0.100f) //! parameters used to scale dynamic learning rate adjustments ('T(x)') #define FEEDBACK_T_DECR (0.2500f) #define FEEDBACK_T_INCR (0.5000f) #define FEEDBACK_T_LOWER (2.0000f) #define FEEDBACK_T_UPPER (256.00f) //! parameters used to define 'unstable' regions, based on segm noise/bg dynamics and local dist threshold values #define UNSTABLE_REG_RATIO_MIN (0.100f) #define UNSTABLE_REG_RDIST_MIN (3.000f) //! parameters used to scale the relative LBSP intensity threshold used for internal comparisons #define LBSPDESC_NONZERO_RATIO_MIN (0.100f) #define LBSPDESC_NONZERO_RATIO_MAX (0.500f) //! parameters used to define model reset/learning rate boosts in our frame-level component #define FRAMELEVEL_MIN_COLOR_DIFF_THRESHOLD (m_nMinColorDistThreshold/2) #define FRAMELEVEL_ANALYSIS_DOWNSAMPLE_RATIO (8) // local define used to display debug information #define DISPLAY_SUBSENSE_DEBUG_INFO 0 // local define used to specify the default frame size (320x240 = QVGA) #define DEFAULT_FRAME_SIZE cv::Size(320,240) // local define used to specify the color dist threshold offset used for unstable regions #define STAB_COLOR_DIST_OFFSET (m_nMinColorDistThreshold/5) // local define used to specify the desc dist threshold offset used for unstable regions #define UNSTAB_DESC_DIST_OFFSET (m_nDescDistThresholdOffset) static const size_t s_nColorMaxDataRange_1ch = UCHAR_MAX; static const size_t s_nDescMaxDataRange_1ch = LBSP::DESC_SIZE*8; static const size_t s_nColorMaxDataRange_3ch = s_nColorMaxDataRange_1ch*3; static const size_t s_nDescMaxDataRange_3ch = s_nDescMaxDataRange_1ch*3; BackgroundSubtractorSuBSENSE::BackgroundSubtractorSuBSENSE( float fRelLBSPThreshold ,size_t nDescDistThresholdOffset ,size_t nMinColorDistThreshold ,size_t nBGSamples ,size_t nRequiredBGSamples ,size_t nSamplesForMovingAvgs) : BackgroundSubtractorLBSP(fRelLBSPThreshold) ,m_nMinColorDistThreshold(nMinColorDistThreshold) ,m_nDescDistThresholdOffset(nDescDistThresholdOffset) ,m_nBGSamples(nBGSamples) ,m_nRequiredBGSamples(nRequiredBGSamples) ,m_nSamplesForMovingAvgs(nSamplesForMovingAvgs) ,m_fLastNonZeroDescRatio(0.0f) ,m_bLearningRateScalingEnabled(true) ,m_fCurrLearningRateLowerCap(FEEDBACK_T_LOWER) ,m_fCurrLearningRateUpperCap(FEEDBACK_T_UPPER) ,m_nMedianBlurKernelSize(m_nDefaultMedianBlurKernelSize) ,m_bUse3x3Spread(true) ,m_defaultMorphologyKernel(cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3))) { CV_Assert(m_nBGSamples>0 && m_nRequiredBGSamples<=m_nBGSamples); CV_Assert(m_nMinColorDistThreshold>=STAB_COLOR_DIST_OFFSET); } BackgroundSubtractorSuBSENSE::~BackgroundSubtractorSuBSENSE() { if(m_aPxIdxLUT) delete[] m_aPxIdxLUT; if(m_aPxInfoLUT) delete[] m_aPxInfoLUT; } void BackgroundSubtractorSuBSENSE::initialize(const cv::Mat& oInitImg, const cv::Mat& oROI) { // == init CV_Assert(!oInitImg.empty() && oInitImg.cols>0 && oInitImg.rows>0); CV_Assert(oInitImg.isContinuous()); CV_Assert(oInitImg.type()==CV_8UC3 || oInitImg.type()==CV_8UC1); if(oInitImg.type()==CV_8UC3) { std::vector<cv::Mat> voInitImgChannels; cv::split(oInitImg,voInitImgChannels); if(!cv::countNonZero((voInitImgChannels[0]!=voInitImgChannels[1])|(voInitImgChannels[2]!=voInitImgChannels[1]))) std::cout << std::endl << "\tBackgroundSubtractorSuBSENSE : Warning, grayscale images should always be passed in CV_8UC1 format for optimal performance." << std::endl; } cv::Mat oNewBGROI; if(oROI.empty() && (m_oROI.empty() || oROI.size()!=oInitImg.size())) { oNewBGROI.create(oInitImg.size(),CV_8UC1); oNewBGROI = cv::Scalar_<uchar>(UCHAR_MAX); } else if(oROI.empty()) oNewBGROI = m_oROI; else { CV_Assert(oROI.size()==oInitImg.size() && oROI.type()==CV_8UC1); CV_Assert(cv::countNonZero((oROI<UCHAR_MAX)&(oROI>0))==0); oNewBGROI = oROI.clone(); cv::Mat oTempROI; cv::dilate(oNewBGROI,oTempROI,m_defaultMorphologyKernel,cv::Point(-1,-1),LBSP::PATCH_SIZE/2); cv::bitwise_or(oNewBGROI,oTempROI/2,oNewBGROI); } const size_t nOrigROIPxCount = (size_t)cv::countNonZero(oNewBGROI); CV_Assert(nOrigROIPxCount>0); LBSP::validateROI(oNewBGROI); const size_t nFinalROIPxCount = (size_t)cv::countNonZero(oNewBGROI); CV_Assert(nFinalROIPxCount>0); m_oROI = oNewBGROI; m_oImgSize = oInitImg.size(); m_nImgType = oInitImg.type(); m_nImgChannels = oInitImg.channels(); m_nTotPxCount = m_oImgSize.area(); m_nTotRelevantPxCount = nFinalROIPxCount; m_nFrameIndex = 0; m_nFramesSinceLastReset = 0; m_nModelResetCooldown = 0; m_fLastNonZeroDescRatio = 0.0f; const int nTotImgPixels = m_oImgSize.height*m_oImgSize.width; if(nOrigROIPxCount>=m_nTotPxCount/2 && (int)m_nTotPxCount>=DEFAULT_FRAME_SIZE.area()) { m_bLearningRateScalingEnabled = true; m_bAutoModelResetEnabled = true; m_bUse3x3Spread = !(nTotImgPixels>DEFAULT_FRAME_SIZE.area()*2); const int nRawMedianBlurKernelSize = std::min((int)floor((float)nTotImgPixels/DEFAULT_FRAME_SIZE.area()+0.5f)+m_nDefaultMedianBlurKernelSize,14); m_nMedianBlurKernelSize = (nRawMedianBlurKernelSize%2)?nRawMedianBlurKernelSize:nRawMedianBlurKernelSize-1; m_fCurrLearningRateLowerCap = FEEDBACK_T_LOWER; m_fCurrLearningRateUpperCap = FEEDBACK_T_UPPER; } else { m_bLearningRateScalingEnabled = false; m_bAutoModelResetEnabled = false; m_bUse3x3Spread = true; m_nMedianBlurKernelSize = m_nDefaultMedianBlurKernelSize; m_fCurrLearningRateLowerCap = FEEDBACK_T_LOWER*2; m_fCurrLearningRateUpperCap = FEEDBACK_T_UPPER*2; } m_oUpdateRateFrame.create(m_oImgSize,CV_32FC1); m_oUpdateRateFrame = cv::Scalar(m_fCurrLearningRateLowerCap); m_oDistThresholdFrame.create(m_oImgSize,CV_32FC1); m_oDistThresholdFrame = cv::Scalar(1.0f); m_oVariationModulatorFrame.create(m_oImgSize,CV_32FC1); m_oVariationModulatorFrame = cv::Scalar(10.0f); // should always be >= FEEDBACK_V_DECR m_oMeanLastDistFrame.create(m_oImgSize,CV_32FC1); m_oMeanLastDistFrame = cv::Scalar(0.0f); m_oMeanMinDistFrame_LT.create(m_oImgSize,CV_32FC1); m_oMeanMinDistFrame_LT = cv::Scalar(0.0f); m_oMeanMinDistFrame_ST.create(m_oImgSize,CV_32FC1); m_oMeanMinDistFrame_ST = cv::Scalar(0.0f); m_oDownSampledFrameSize = cv::Size(m_oImgSize.width/FRAMELEVEL_ANALYSIS_DOWNSAMPLE_RATIO,m_oImgSize.height/FRAMELEVEL_ANALYSIS_DOWNSAMPLE_RATIO); m_oMeanDownSampledLastDistFrame_LT.create(m_oDownSampledFrameSize,CV_32FC((int)m_nImgChannels)); m_oMeanDownSampledLastDistFrame_LT = cv::Scalar(0.0f); m_oMeanDownSampledLastDistFrame_ST.create(m_oDownSampledFrameSize,CV_32FC((int)m_nImgChannels)); m_oMeanDownSampledLastDistFrame_ST = cv::Scalar(0.0f); m_oMeanRawSegmResFrame_LT.create(m_oImgSize,CV_32FC1); m_oMeanRawSegmResFrame_LT = cv::Scalar(0.0f); m_oMeanRawSegmResFrame_ST.create(m_oImgSize,CV_32FC1); m_oMeanRawSegmResFrame_ST = cv::Scalar(0.0f); m_oMeanFinalSegmResFrame_LT.create(m_oImgSize,CV_32FC1); m_oMeanFinalSegmResFrame_LT = cv::Scalar(0.0f); m_oMeanFinalSegmResFrame_ST.create(m_oImgSize,CV_32FC1); m_oMeanFinalSegmResFrame_ST = cv::Scalar(0.0f); m_oUnstableRegionMask.create(m_oImgSize,CV_8UC1); m_oUnstableRegionMask = cv::Scalar_<uchar>(0); m_oBlinksFrame.create(m_oImgSize,CV_8UC1); m_oBlinksFrame = cv::Scalar_<uchar>(0); m_oDownSampledFrame_MotionAnalysis.create(m_oDownSampledFrameSize,CV_8UC((int)m_nImgChannels)); m_oDownSampledFrame_MotionAnalysis = cv::Scalar_<uchar>::all(0); m_oLastColorFrame.create(m_oImgSize,CV_8UC((int)m_nImgChannels)); m_oLastColorFrame = cv::Scalar_<uchar>::all(0); m_oLastDescFrame.create(m_oImgSize,CV_16UC((int)m_nImgChannels)); m_oLastDescFrame = cv::Scalar_<ushort>::all(0); m_oLastRawFGMask.create(m_oImgSize,CV_8UC1); m_oLastRawFGMask = cv::Scalar_<uchar>(0); m_oLastFGMask.create(m_oImgSize,CV_8UC1); m_oLastFGMask = cv::Scalar_<uchar>(0); m_oLastFGMask_dilated.create(m_oImgSize,CV_8UC1); m_oLastFGMask_dilated = cv::Scalar_<uchar>(0); m_oLastFGMask_dilated_inverted.create(m_oImgSize,CV_8UC1); m_oLastFGMask_dilated_inverted = cv::Scalar_<uchar>(0); m_oFGMask_FloodedHoles.create(m_oImgSize,CV_8UC1); m_oFGMask_FloodedHoles = cv::Scalar_<uchar>(0); m_oFGMask_PreFlood.create(m_oImgSize,CV_8UC1); m_oFGMask_PreFlood = cv::Scalar_<uchar>(0); m_oCurrRawFGBlinkMask.create(m_oImgSize,CV_8UC1); m_oCurrRawFGBlinkMask = cv::Scalar_<uchar>(0); m_oLastRawFGBlinkMask.create(m_oImgSize,CV_8UC1); m_oLastRawFGBlinkMask = cv::Scalar_<uchar>(0); m_voBGColorSamples.resize(m_nBGSamples); m_voBGDescSamples.resize(m_nBGSamples); for(size_t s=0; s<m_nBGSamples; ++s) { m_voBGColorSamples[s].create(m_oImgSize,CV_8UC((int)m_nImgChannels)); m_voBGColorSamples[s] = cv::Scalar_<uchar>::all(0); m_voBGDescSamples[s].create(m_oImgSize,CV_16UC((int)m_nImgChannels)); m_voBGDescSamples[s] = cv::Scalar_<ushort>::all(0); } if(m_aPxIdxLUT) delete[] m_aPxIdxLUT; if(m_aPxInfoLUT) delete[] m_aPxInfoLUT; m_aPxIdxLUT = new size_t[m_nTotRelevantPxCount]; m_aPxInfoLUT = new PxInfoBase[m_nTotPxCount]; if(m_nImgChannels==1) { CV_Assert(m_oLastColorFrame.step.p[0]==(size_t)m_oImgSize.width && m_oLastColorFrame.step.p[1]==1); CV_Assert(m_oLastDescFrame.step.p[0]==m_oLastColorFrame.step.p[0]*2 && m_oLastDescFrame.step.p[1]==m_oLastColorFrame.step.p[1]*2); for(size_t t=0; t<=UCHAR_MAX; ++t) m_anLBSPThreshold_8bitLUT[t] = cv::saturate_cast<uchar>((m_nLBSPThresholdOffset+t*m_fRelLBSPThreshold)/3); for(size_t nPxIter=0, nModelIter=0; nPxIter<m_nTotPxCount; ++nPxIter) { if(m_oROI.data[nPxIter]) { m_aPxIdxLUT[nModelIter] = nPxIter; m_aPxInfoLUT[nPxIter].nImgCoord_Y = (int)nPxIter/m_oImgSize.width; m_aPxInfoLUT[nPxIter].nImgCoord_X = (int)nPxIter%m_oImgSize.width; m_aPxInfoLUT[nPxIter].nModelIdx = nModelIter; m_oLastColorFrame.data[nPxIter] = oInitImg.data[nPxIter]; const size_t nDescIter = nPxIter*2; LBSP::computeGrayscaleDescriptor(oInitImg,oInitImg.data[nPxIter],m_aPxInfoLUT[nPxIter].nImgCoord_X,m_aPxInfoLUT[nPxIter].nImgCoord_Y,m_anLBSPThreshold_8bitLUT[oInitImg.data[nPxIter]],*((ushort*)(m_oLastDescFrame.data+nDescIter))); ++nModelIter; } } } else { //m_nImgChannels==3 CV_Assert(m_oLastColorFrame.step.p[0]==(size_t)m_oImgSize.width*3 && m_oLastColorFrame.step.p[1]==3); CV_Assert(m_oLastDescFrame.step.p[0]==m_oLastColorFrame.step.p[0]*2 && m_oLastDescFrame.step.p[1]==m_oLastColorFrame.step.p[1]*2); for(size_t t=0; t<=UCHAR_MAX; ++t) m_anLBSPThreshold_8bitLUT[t] = cv::saturate_cast<uchar>(m_nLBSPThresholdOffset+t*m_fRelLBSPThreshold); for(size_t nPxIter=0, nModelIter=0; nPxIter<m_nTotPxCount; ++nPxIter) { if(m_oROI.data[nPxIter]) { m_aPxIdxLUT[nModelIter] = nPxIter; m_aPxInfoLUT[nPxIter].nImgCoord_Y = (int)nPxIter/m_oImgSize.width; m_aPxInfoLUT[nPxIter].nImgCoord_X = (int)nPxIter%m_oImgSize.width; m_aPxInfoLUT[nPxIter].nModelIdx = nModelIter; const size_t nPxRGBIter = nPxIter*3; const size_t nDescRGBIter = nPxRGBIter*2; for(size_t c=0; c<3; ++c) { m_oLastColorFrame.data[nPxRGBIter+c] = oInitImg.data[nPxRGBIter+c]; LBSP::computeSingleRGBDescriptor(oInitImg,oInitImg.data[nPxRGBIter+c],m_aPxInfoLUT[nPxIter].nImgCoord_X,m_aPxInfoLUT[nPxIter].nImgCoord_Y,c,m_anLBSPThreshold_8bitLUT[oInitImg.data[nPxRGBIter+c]],((ushort*)(m_oLastDescFrame.data+nDescRGBIter))[c]); } ++nModelIter; } } } m_bInitialized = true; refreshModel(1.0f); } void BackgroundSubtractorSuBSENSE::refreshModel(float fSamplesRefreshFrac, bool bForceFGUpdate) { // == refresh CV_Assert(m_bInitialized); CV_Assert(fSamplesRefreshFrac>0.0f && fSamplesRefreshFrac<=1.0f); const size_t nModelsToRefresh = fSamplesRefreshFrac<1.0f?(size_t)(fSamplesRefreshFrac*m_nBGSamples):m_nBGSamples; const size_t nRefreshStartPos = fSamplesRefreshFrac<1.0f?rand()%m_nBGSamples:0; if(m_nImgChannels==1) { for(size_t nModelIter=0; nModelIter<m_nTotRelevantPxCount; ++nModelIter) { const size_t nPxIter = m_aPxIdxLUT[nModelIter]; if(bForceFGUpdate || !m_oLastFGMask.data[nPxIter]) { for(size_t nCurrModelIdx=nRefreshStartPos; nCurrModelIdx<nRefreshStartPos+nModelsToRefresh; ++nCurrModelIdx) { int nSampleImgCoord_Y, nSampleImgCoord_X; getRandSamplePosition(nSampleImgCoord_X,nSampleImgCoord_Y,m_aPxInfoLUT[nPxIter].nImgCoord_X,m_aPxInfoLUT[nPxIter].nImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize); const size_t nSamplePxIdx = m_oImgSize.width*nSampleImgCoord_Y + nSampleImgCoord_X; if(bForceFGUpdate || !m_oLastFGMask.data[nSamplePxIdx]) { const size_t nCurrRealModelIdx = nCurrModelIdx%m_nBGSamples; m_voBGColorSamples[nCurrRealModelIdx].data[nPxIter] = m_oLastColorFrame.data[nSamplePxIdx]; *((ushort*)(m_voBGDescSamples[nCurrRealModelIdx].data+nPxIter*2)) = *((ushort*)(m_oLastDescFrame.data+nSamplePxIdx*2)); } } } } } else { //m_nImgChannels==3 for(size_t nModelIter=0; nModelIter<m_nTotRelevantPxCount; ++nModelIter) { const size_t nPxIter = m_aPxIdxLUT[nModelIter]; if(bForceFGUpdate || !m_oLastFGMask.data[nPxIter]) { for(size_t nCurrModelIdx=nRefreshStartPos; nCurrModelIdx<nRefreshStartPos+nModelsToRefresh; ++nCurrModelIdx) { int nSampleImgCoord_Y, nSampleImgCoord_X; getRandSamplePosition(nSampleImgCoord_X,nSampleImgCoord_Y,m_aPxInfoLUT[nPxIter].nImgCoord_X,m_aPxInfoLUT[nPxIter].nImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize); const size_t nSamplePxIdx = m_oImgSize.width*nSampleImgCoord_Y + nSampleImgCoord_X; if(bForceFGUpdate || !m_oLastFGMask.data[nSamplePxIdx]) { const size_t nCurrRealModelIdx = nCurrModelIdx%m_nBGSamples; for(size_t c=0; c<3; ++c) { m_voBGColorSamples[nCurrRealModelIdx].data[nPxIter*3+c] = m_oLastColorFrame.data[nSamplePxIdx*3+c]; *((ushort*)(m_voBGDescSamples[nCurrRealModelIdx].data+(nPxIter*3+c)*2)) = *((ushort*)(m_oLastDescFrame.data+(nSamplePxIdx*3+c)*2)); } } } } } } } void BackgroundSubtractorSuBSENSE::operator()(cv::InputArray _image, cv::OutputArray _fgmask, double learningRateOverride) { // == process CV_Assert(m_bInitialized); cv::Mat oInputImg = _image.getMat(); CV_Assert(oInputImg.type()==m_nImgType && oInputImg.size()==m_oImgSize); CV_Assert(oInputImg.isContinuous()); _fgmask.create(m_oImgSize,CV_8UC1); cv::Mat oCurrFGMask = _fgmask.getMat(); memset(oCurrFGMask.data,0,oCurrFGMask.cols*oCurrFGMask.rows); size_t nNonZeroDescCount = 0; const float fRollAvgFactor_LT = 1.0f/std::min(++m_nFrameIndex,m_nSamplesForMovingAvgs); const float fRollAvgFactor_ST = 1.0f/std::min(m_nFrameIndex,m_nSamplesForMovingAvgs/4); if(m_nImgChannels==1) { for(size_t nModelIter=0; nModelIter<m_nTotRelevantPxCount; ++nModelIter) { const size_t nPxIter = m_aPxIdxLUT[nModelIter]; const size_t nDescIter = nPxIter*2; const size_t nFloatIter = nPxIter*4; const int nCurrImgCoord_X = m_aPxInfoLUT[nPxIter].nImgCoord_X; const int nCurrImgCoord_Y = m_aPxInfoLUT[nPxIter].nImgCoord_Y; const uchar nCurrColor = oInputImg.data[nPxIter]; size_t nMinDescDist = s_nDescMaxDataRange_1ch; size_t nMinSumDist = s_nColorMaxDataRange_1ch; float* pfCurrDistThresholdFactor = (float*)(m_oDistThresholdFrame.data+nFloatIter); float* pfCurrVariationFactor = (float*)(m_oVariationModulatorFrame.data+nFloatIter); float* pfCurrLearningRate = ((float*)(m_oUpdateRateFrame.data+nFloatIter)); float* pfCurrMeanLastDist = ((float*)(m_oMeanLastDistFrame.data+nFloatIter)); float* pfCurrMeanMinDist_LT = ((float*)(m_oMeanMinDistFrame_LT.data+nFloatIter)); float* pfCurrMeanMinDist_ST = ((float*)(m_oMeanMinDistFrame_ST.data+nFloatIter)); float* pfCurrMeanRawSegmRes_LT = ((float*)(m_oMeanRawSegmResFrame_LT.data+nFloatIter)); float* pfCurrMeanRawSegmRes_ST = ((float*)(m_oMeanRawSegmResFrame_ST.data+nFloatIter)); float* pfCurrMeanFinalSegmRes_LT = ((float*)(m_oMeanFinalSegmResFrame_LT.data+nFloatIter)); float* pfCurrMeanFinalSegmRes_ST = ((float*)(m_oMeanFinalSegmResFrame_ST.data+nFloatIter)); ushort& nLastIntraDesc = *((ushort*)(m_oLastDescFrame.data+nDescIter)); uchar& nLastColor = m_oLastColorFrame.data[nPxIter]; const size_t nCurrColorDistThreshold = (size_t)(((*pfCurrDistThresholdFactor)*m_nMinColorDistThreshold)-((!m_oUnstableRegionMask.data[nPxIter])*STAB_COLOR_DIST_OFFSET))/2; const size_t nCurrDescDistThreshold = ((size_t)1<<((size_t)floor(*pfCurrDistThresholdFactor+0.5f)))+m_nDescDistThresholdOffset+(m_oUnstableRegionMask.data[nPxIter]*UNSTAB_DESC_DIST_OFFSET); ushort nCurrInterDesc, nCurrIntraDesc; LBSP::computeGrayscaleDescriptor(oInputImg,nCurrColor,nCurrImgCoord_X,nCurrImgCoord_Y,m_anLBSPThreshold_8bitLUT[nCurrColor],nCurrIntraDesc); m_oUnstableRegionMask.data[nPxIter] = ((*pfCurrDistThresholdFactor)>UNSTABLE_REG_RDIST_MIN || (*pfCurrMeanRawSegmRes_LT-*pfCurrMeanFinalSegmRes_LT)>UNSTABLE_REG_RATIO_MIN || (*pfCurrMeanRawSegmRes_ST-*pfCurrMeanFinalSegmRes_ST)>UNSTABLE_REG_RATIO_MIN)?1:0; size_t nGoodSamplesCount=0, nSampleIdx=0; while(nGoodSamplesCount<m_nRequiredBGSamples && nSampleIdx<m_nBGSamples) { const uchar& nBGColor = m_voBGColorSamples[nSampleIdx].data[nPxIter]; { const size_t nColorDist = L1dist(nCurrColor,nBGColor); if(nColorDist>nCurrColorDistThreshold) goto failedcheck1ch; const ushort& nBGIntraDesc = *((ushort*)(m_voBGDescSamples[nSampleIdx].data+nDescIter)); const size_t nIntraDescDist = hdist(nCurrIntraDesc,nBGIntraDesc); LBSP::computeGrayscaleDescriptor(oInputImg,nBGColor,nCurrImgCoord_X,nCurrImgCoord_Y,m_anLBSPThreshold_8bitLUT[nBGColor],nCurrInterDesc); const size_t nInterDescDist = hdist(nCurrInterDesc,nBGIntraDesc); const size_t nDescDist = (nIntraDescDist+nInterDescDist)/2; if(nDescDist>nCurrDescDistThreshold) goto failedcheck1ch; const size_t nSumDist = std::min((nDescDist/4)*(s_nColorMaxDataRange_1ch/s_nDescMaxDataRange_1ch)+nColorDist,s_nColorMaxDataRange_1ch); if(nSumDist>nCurrColorDistThreshold) goto failedcheck1ch; if(nMinDescDist>nDescDist) nMinDescDist = nDescDist; if(nMinSumDist>nSumDist) nMinSumDist = nSumDist; nGoodSamplesCount++; } failedcheck1ch: nSampleIdx++; } const float fNormalizedLastDist = ((float)L1dist(nLastColor,nCurrColor)/s_nColorMaxDataRange_1ch+(float)hdist(nLastIntraDesc,nCurrIntraDesc)/s_nDescMaxDataRange_1ch)/2; *pfCurrMeanLastDist = (*pfCurrMeanLastDist)*(1.0f-fRollAvgFactor_ST) + fNormalizedLastDist*fRollAvgFactor_ST; if(nGoodSamplesCount<m_nRequiredBGSamples) { // == foreground const float fNormalizedMinDist = std::min(1.0f,((float)nMinSumDist/s_nColorMaxDataRange_1ch+(float)nMinDescDist/s_nDescMaxDataRange_1ch)/2 + (float)(m_nRequiredBGSamples-nGoodSamplesCount)/m_nRequiredBGSamples); *pfCurrMeanMinDist_LT = (*pfCurrMeanMinDist_LT)*(1.0f-fRollAvgFactor_LT) + fNormalizedMinDist*fRollAvgFactor_LT; *pfCurrMeanMinDist_ST = (*pfCurrMeanMinDist_ST)*(1.0f-fRollAvgFactor_ST) + fNormalizedMinDist*fRollAvgFactor_ST; *pfCurrMeanRawSegmRes_LT = (*pfCurrMeanRawSegmRes_LT)*(1.0f-fRollAvgFactor_LT) + fRollAvgFactor_LT; *pfCurrMeanRawSegmRes_ST = (*pfCurrMeanRawSegmRes_ST)*(1.0f-fRollAvgFactor_ST) + fRollAvgFactor_ST; oCurrFGMask.data[nPxIter] = UCHAR_MAX; if(m_nModelResetCooldown && (rand()%(size_t)FEEDBACK_T_LOWER)==0) { const size_t s_rand = rand()%m_nBGSamples; *((ushort*)(m_voBGDescSamples[s_rand].data+nDescIter)) = nCurrIntraDesc; m_voBGColorSamples[s_rand].data[nPxIter] = nCurrColor; } } else { // == background const float fNormalizedMinDist = ((float)nMinSumDist/s_nColorMaxDataRange_1ch+(float)nMinDescDist/s_nDescMaxDataRange_1ch)/2; *pfCurrMeanMinDist_LT = (*pfCurrMeanMinDist_LT)*(1.0f-fRollAvgFactor_LT) + fNormalizedMinDist*fRollAvgFactor_LT; *pfCurrMeanMinDist_ST = (*pfCurrMeanMinDist_ST)*(1.0f-fRollAvgFactor_ST) + fNormalizedMinDist*fRollAvgFactor_ST; *pfCurrMeanRawSegmRes_LT = (*pfCurrMeanRawSegmRes_LT)*(1.0f-fRollAvgFactor_LT); *pfCurrMeanRawSegmRes_ST = (*pfCurrMeanRawSegmRes_ST)*(1.0f-fRollAvgFactor_ST); const size_t nLearningRate = learningRateOverride>0?(size_t)ceil(learningRateOverride):(size_t)ceil(*pfCurrLearningRate); if((rand()%nLearningRate)==0) { const size_t s_rand = rand()%m_nBGSamples; *((ushort*)(m_voBGDescSamples[s_rand].data+nDescIter)) = nCurrIntraDesc; m_voBGColorSamples[s_rand].data[nPxIter] = nCurrColor; } int nSampleImgCoord_Y, nSampleImgCoord_X; const bool bCurrUsing3x3Spread = m_bUse3x3Spread && !m_oUnstableRegionMask.data[nPxIter]; if(bCurrUsing3x3Spread) getRandNeighborPosition_3x3(nSampleImgCoord_X,nSampleImgCoord_Y,nCurrImgCoord_X,nCurrImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize); else getRandNeighborPosition_5x5(nSampleImgCoord_X,nSampleImgCoord_Y,nCurrImgCoord_X,nCurrImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize); const size_t n_rand = rand(); const size_t idx_rand_uchar = m_oImgSize.width*nSampleImgCoord_Y + nSampleImgCoord_X; const size_t idx_rand_flt32 = idx_rand_uchar*4; const float fRandMeanLastDist = *((float*)(m_oMeanLastDistFrame.data+idx_rand_flt32)); const float fRandMeanRawSegmRes = *((float*)(m_oMeanRawSegmResFrame_ST.data+idx_rand_flt32)); if((n_rand%(bCurrUsing3x3Spread?nLearningRate:(nLearningRate/2+1)))==0 || (fRandMeanRawSegmRes>GHOSTDET_S_MIN && fRandMeanLastDist<GHOSTDET_D_MAX && (n_rand%((size_t)m_fCurrLearningRateLowerCap))==0)) { const size_t idx_rand_ushrt = idx_rand_uchar*2; const size_t s_rand = rand()%m_nBGSamples; *((ushort*)(m_voBGDescSamples[s_rand].data+idx_rand_ushrt)) = nCurrIntraDesc; m_voBGColorSamples[s_rand].data[idx_rand_uchar] = nCurrColor; } } if(m_oLastFGMask.data[nPxIter] || (std::min(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)<UNSTABLE_REG_RATIO_MIN && oCurrFGMask.data[nPxIter])) { if((*pfCurrLearningRate)<m_fCurrLearningRateUpperCap) *pfCurrLearningRate += FEEDBACK_T_INCR/(std::max(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)*(*pfCurrVariationFactor)); } else if((*pfCurrLearningRate)>m_fCurrLearningRateLowerCap) *pfCurrLearningRate -= FEEDBACK_T_DECR*(*pfCurrVariationFactor)/std::max(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST); if((*pfCurrLearningRate)<m_fCurrLearningRateLowerCap) *pfCurrLearningRate = m_fCurrLearningRateLowerCap; else if((*pfCurrLearningRate)>m_fCurrLearningRateUpperCap) *pfCurrLearningRate = m_fCurrLearningRateUpperCap; if(std::max(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)>UNSTABLE_REG_RATIO_MIN && m_oBlinksFrame.data[nPxIter]) (*pfCurrVariationFactor) += FEEDBACK_V_INCR; else if((*pfCurrVariationFactor)>FEEDBACK_V_DECR) { (*pfCurrVariationFactor) -= m_oLastFGMask.data[nPxIter]?FEEDBACK_V_DECR/4:m_oUnstableRegionMask.data[nPxIter]?FEEDBACK_V_DECR/2:FEEDBACK_V_DECR; if((*pfCurrVariationFactor)<FEEDBACK_V_DECR) (*pfCurrVariationFactor) = FEEDBACK_V_DECR; } if((*pfCurrDistThresholdFactor)<std::pow(1.0f+std::min(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)*2,2)) (*pfCurrDistThresholdFactor) += FEEDBACK_R_VAR*(*pfCurrVariationFactor-FEEDBACK_V_DECR); else { (*pfCurrDistThresholdFactor) -= FEEDBACK_R_VAR/(*pfCurrVariationFactor); if((*pfCurrDistThresholdFactor)<1.0f) (*pfCurrDistThresholdFactor) = 1.0f; } if(popcount(nCurrIntraDesc)>=2) ++nNonZeroDescCount; nLastIntraDesc = nCurrIntraDesc; nLastColor = nCurrColor; } } else { //m_nImgChannels==3 for(size_t nModelIter=0; nModelIter<m_nTotRelevantPxCount; ++nModelIter) { const size_t nPxIter = m_aPxIdxLUT[nModelIter]; const int nCurrImgCoord_X = m_aPxInfoLUT[nPxIter].nImgCoord_X; const int nCurrImgCoord_Y = m_aPxInfoLUT[nPxIter].nImgCoord_Y; const size_t nPxIterRGB = nPxIter*3; const size_t nDescIterRGB = nPxIterRGB*2; const size_t nFloatIter = nPxIter*4; const uchar* const anCurrColor = oInputImg.data+nPxIterRGB; size_t nMinTotDescDist=s_nDescMaxDataRange_3ch; size_t nMinTotSumDist=s_nColorMaxDataRange_3ch; float* pfCurrDistThresholdFactor = (float*)(m_oDistThresholdFrame.data+nFloatIter); float* pfCurrVariationFactor = (float*)(m_oVariationModulatorFrame.data+nFloatIter); float* pfCurrLearningRate = ((float*)(m_oUpdateRateFrame.data+nFloatIter)); float* pfCurrMeanLastDist = ((float*)(m_oMeanLastDistFrame.data+nFloatIter)); float* pfCurrMeanMinDist_LT = ((float*)(m_oMeanMinDistFrame_LT.data+nFloatIter)); float* pfCurrMeanMinDist_ST = ((float*)(m_oMeanMinDistFrame_ST.data+nFloatIter)); float* pfCurrMeanRawSegmRes_LT = ((float*)(m_oMeanRawSegmResFrame_LT.data+nFloatIter)); float* pfCurrMeanRawSegmRes_ST = ((float*)(m_oMeanRawSegmResFrame_ST.data+nFloatIter)); float* pfCurrMeanFinalSegmRes_LT = ((float*)(m_oMeanFinalSegmResFrame_LT.data+nFloatIter)); float* pfCurrMeanFinalSegmRes_ST = ((float*)(m_oMeanFinalSegmResFrame_ST.data+nFloatIter)); ushort* anLastIntraDesc = ((ushort*)(m_oLastDescFrame.data+nDescIterRGB)); uchar* anLastColor = m_oLastColorFrame.data+nPxIterRGB; const size_t nCurrColorDistThreshold = (size_t)(((*pfCurrDistThresholdFactor)*m_nMinColorDistThreshold)-((!m_oUnstableRegionMask.data[nPxIter])*STAB_COLOR_DIST_OFFSET)); const size_t nCurrDescDistThreshold = ((size_t)1<<((size_t)floor(*pfCurrDistThresholdFactor+0.5f)))+m_nDescDistThresholdOffset+(m_oUnstableRegionMask.data[nPxIter]*UNSTAB_DESC_DIST_OFFSET); const size_t nCurrTotColorDistThreshold = nCurrColorDistThreshold*3; const size_t nCurrTotDescDistThreshold = nCurrDescDistThreshold*3; const size_t nCurrSCColorDistThreshold = nCurrTotColorDistThreshold/2; ushort anCurrInterDesc[3], anCurrIntraDesc[3]; const size_t anCurrIntraLBSPThresholds[3] = {m_anLBSPThreshold_8bitLUT[anCurrColor[0]],m_anLBSPThreshold_8bitLUT[anCurrColor[1]],m_anLBSPThreshold_8bitLUT[anCurrColor[2]]}; LBSP::computeRGBDescriptor(oInputImg,anCurrColor,nCurrImgCoord_X,nCurrImgCoord_Y,anCurrIntraLBSPThresholds,anCurrIntraDesc); m_oUnstableRegionMask.data[nPxIter] = ((*pfCurrDistThresholdFactor)>UNSTABLE_REG_RDIST_MIN || (*pfCurrMeanRawSegmRes_LT-*pfCurrMeanFinalSegmRes_LT)>UNSTABLE_REG_RATIO_MIN || (*pfCurrMeanRawSegmRes_ST-*pfCurrMeanFinalSegmRes_ST)>UNSTABLE_REG_RATIO_MIN)?1:0; size_t nGoodSamplesCount=0, nSampleIdx=0; while(nGoodSamplesCount<m_nRequiredBGSamples && nSampleIdx<m_nBGSamples) { const ushort* const anBGIntraDesc = (ushort*)(m_voBGDescSamples[nSampleIdx].data+nDescIterRGB); const uchar* const anBGColor = m_voBGColorSamples[nSampleIdx].data+nPxIterRGB; size_t nTotDescDist = 0; size_t nTotSumDist = 0; for(size_t c=0;c<3; ++c) { const size_t nColorDist = L1dist(anCurrColor[c],anBGColor[c]); if(nColorDist>nCurrSCColorDistThreshold) goto failedcheck3ch; const size_t nIntraDescDist = hdist(anCurrIntraDesc[c],anBGIntraDesc[c]); LBSP::computeSingleRGBDescriptor(oInputImg,anBGColor[c],nCurrImgCoord_X,nCurrImgCoord_Y,c,m_anLBSPThreshold_8bitLUT[anBGColor[c]],anCurrInterDesc[c]); const size_t nInterDescDist = hdist(anCurrInterDesc[c],anBGIntraDesc[c]); const size_t nDescDist = (nIntraDescDist+nInterDescDist)/2; const size_t nSumDist = std::min((nDescDist/2)*(s_nColorMaxDataRange_1ch/s_nDescMaxDataRange_1ch)+nColorDist,s_nColorMaxDataRange_1ch); if(nSumDist>nCurrSCColorDistThreshold) goto failedcheck3ch; nTotDescDist += nDescDist; nTotSumDist += nSumDist; } if(nTotDescDist>nCurrTotDescDistThreshold || nTotSumDist>nCurrTotColorDistThreshold) goto failedcheck3ch; if(nMinTotDescDist>nTotDescDist) nMinTotDescDist = nTotDescDist; if(nMinTotSumDist>nTotSumDist) nMinTotSumDist = nTotSumDist; nGoodSamplesCount++; failedcheck3ch: nSampleIdx++; } const float fNormalizedLastDist = ((float)L1dist<3>(anLastColor,anCurrColor)/s_nColorMaxDataRange_3ch+(float)hdist<3>(anLastIntraDesc,anCurrIntraDesc)/s_nDescMaxDataRange_3ch)/2; *pfCurrMeanLastDist = (*pfCurrMeanLastDist)*(1.0f-fRollAvgFactor_ST) + fNormalizedLastDist*fRollAvgFactor_ST; if(nGoodSamplesCount<m_nRequiredBGSamples) { // == foreground const float fNormalizedMinDist = std::min(1.0f,((float)nMinTotSumDist/s_nColorMaxDataRange_3ch+(float)nMinTotDescDist/s_nDescMaxDataRange_3ch)/2 + (float)(m_nRequiredBGSamples-nGoodSamplesCount)/m_nRequiredBGSamples); *pfCurrMeanMinDist_LT = (*pfCurrMeanMinDist_LT)*(1.0f-fRollAvgFactor_LT) + fNormalizedMinDist*fRollAvgFactor_LT; *pfCurrMeanMinDist_ST = (*pfCurrMeanMinDist_ST)*(1.0f-fRollAvgFactor_ST) + fNormalizedMinDist*fRollAvgFactor_ST; *pfCurrMeanRawSegmRes_LT = (*pfCurrMeanRawSegmRes_LT)*(1.0f-fRollAvgFactor_LT) + fRollAvgFactor_LT; *pfCurrMeanRawSegmRes_ST = (*pfCurrMeanRawSegmRes_ST)*(1.0f-fRollAvgFactor_ST) + fRollAvgFactor_ST; oCurrFGMask.data[nPxIter] = UCHAR_MAX; if(m_nModelResetCooldown && (rand()%(size_t)FEEDBACK_T_LOWER)==0) { const size_t s_rand = rand()%m_nBGSamples; for(size_t c=0; c<3; ++c) { *((ushort*)(m_voBGDescSamples[s_rand].data+nDescIterRGB+2*c)) = anCurrIntraDesc[c]; *(m_voBGColorSamples[s_rand].data+nPxIterRGB+c) = anCurrColor[c]; } } } else { // == background const float fNormalizedMinDist = ((float)nMinTotSumDist/s_nColorMaxDataRange_3ch+(float)nMinTotDescDist/s_nDescMaxDataRange_3ch)/2; *pfCurrMeanMinDist_LT = (*pfCurrMeanMinDist_LT)*(1.0f-fRollAvgFactor_LT) + fNormalizedMinDist*fRollAvgFactor_LT; *pfCurrMeanMinDist_ST = (*pfCurrMeanMinDist_ST)*(1.0f-fRollAvgFactor_ST) + fNormalizedMinDist*fRollAvgFactor_ST; *pfCurrMeanRawSegmRes_LT = (*pfCurrMeanRawSegmRes_LT)*(1.0f-fRollAvgFactor_LT); *pfCurrMeanRawSegmRes_ST = (*pfCurrMeanRawSegmRes_ST)*(1.0f-fRollAvgFactor_ST); const size_t nLearningRate = learningRateOverride>0?(size_t)ceil(learningRateOverride):(size_t)ceil(*pfCurrLearningRate); if((rand()%nLearningRate)==0) { const size_t s_rand = rand()%m_nBGSamples; for(size_t c=0; c<3; ++c) { *((ushort*)(m_voBGDescSamples[s_rand].data+nDescIterRGB+2*c)) = anCurrIntraDesc[c]; *(m_voBGColorSamples[s_rand].data+nPxIterRGB+c) = anCurrColor[c]; } } int nSampleImgCoord_Y, nSampleImgCoord_X; const bool bCurrUsing3x3Spread = m_bUse3x3Spread && !m_oUnstableRegionMask.data[nPxIter]; if(bCurrUsing3x3Spread) getRandNeighborPosition_3x3(nSampleImgCoord_X,nSampleImgCoord_Y,nCurrImgCoord_X,nCurrImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize); else getRandNeighborPosition_5x5(nSampleImgCoord_X,nSampleImgCoord_Y,nCurrImgCoord_X,nCurrImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize); const size_t n_rand = rand(); const size_t idx_rand_uchar = m_oImgSize.width*nSampleImgCoord_Y + nSampleImgCoord_X; const size_t idx_rand_flt32 = idx_rand_uchar*4; const float fRandMeanLastDist = *((float*)(m_oMeanLastDistFrame.data+idx_rand_flt32)); const float fRandMeanRawSegmRes = *((float*)(m_oMeanRawSegmResFrame_ST.data+idx_rand_flt32)); if((n_rand%(bCurrUsing3x3Spread?nLearningRate:(nLearningRate/2+1)))==0 || (fRandMeanRawSegmRes>GHOSTDET_S_MIN && fRandMeanLastDist<GHOSTDET_D_MAX && (n_rand%((size_t)m_fCurrLearningRateLowerCap))==0)) { const size_t idx_rand_uchar_rgb = idx_rand_uchar*3; const size_t idx_rand_ushrt_rgb = idx_rand_uchar_rgb*2; const size_t s_rand = rand()%m_nBGSamples; for(size_t c=0; c<3; ++c) { *((ushort*)(m_voBGDescSamples[s_rand].data+idx_rand_ushrt_rgb+2*c)) = anCurrIntraDesc[c]; *(m_voBGColorSamples[s_rand].data+idx_rand_uchar_rgb+c) = anCurrColor[c]; } } } if(m_oLastFGMask.data[nPxIter] || (std::min(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)<UNSTABLE_REG_RATIO_MIN && oCurrFGMask.data[nPxIter])) { if((*pfCurrLearningRate)<m_fCurrLearningRateUpperCap) *pfCurrLearningRate += FEEDBACK_T_INCR/(std::max(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)*(*pfCurrVariationFactor)); } else if((*pfCurrLearningRate)>m_fCurrLearningRateLowerCap) *pfCurrLearningRate -= FEEDBACK_T_DECR*(*pfCurrVariationFactor)/std::max(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST); if((*pfCurrLearningRate)<m_fCurrLearningRateLowerCap) *pfCurrLearningRate = m_fCurrLearningRateLowerCap; else if((*pfCurrLearningRate)>m_fCurrLearningRateUpperCap) *pfCurrLearningRate = m_fCurrLearningRateUpperCap; if(std::max(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)>UNSTABLE_REG_RATIO_MIN && m_oBlinksFrame.data[nPxIter]) (*pfCurrVariationFactor) += FEEDBACK_V_INCR; else if((*pfCurrVariationFactor)>FEEDBACK_V_DECR) { (*pfCurrVariationFactor) -= m_oLastFGMask.data[nPxIter]?FEEDBACK_V_DECR/4:m_oUnstableRegionMask.data[nPxIter]?FEEDBACK_V_DECR/2:FEEDBACK_V_DECR; if((*pfCurrVariationFactor)<FEEDBACK_V_DECR) (*pfCurrVariationFactor) = FEEDBACK_V_DECR; } if((*pfCurrDistThresholdFactor)<std::pow(1.0f+std::min(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)*2,2)) (*pfCurrDistThresholdFactor) += FEEDBACK_R_VAR*(*pfCurrVariationFactor-FEEDBACK_V_DECR); else { (*pfCurrDistThresholdFactor) -= FEEDBACK_R_VAR/(*pfCurrVariationFactor); if((*pfCurrDistThresholdFactor)<1.0f) (*pfCurrDistThresholdFactor) = 1.0f; } if(popcount<3>(anCurrIntraDesc)>=4) ++nNonZeroDescCount; for(size_t c=0; c<3; ++c) { anLastIntraDesc[c] = anCurrIntraDesc[c]; anLastColor[c] = anCurrColor[c]; } } } #if DISPLAY_SUBSENSE_DEBUG_INFO std::cout << std::endl; cv::Point dbgpt(nDebugCoordX,nDebugCoordY); cv::Mat oMeanMinDistFrameNormalized; m_oMeanMinDistFrame_ST.copyTo(oMeanMinDistFrameNormalized); cv::circle(oMeanMinDistFrameNormalized,dbgpt,5,cv::Scalar(1.0f)); cv::resize(oMeanMinDistFrameNormalized,oMeanMinDistFrameNormalized,DEFAULT_FRAME_SIZE); cv::imshow("d_min(x)",oMeanMinDistFrameNormalized); std::cout << std::fixed << std::setprecision(5) << " d_min(" << dbgpt << ") = " << m_oMeanMinDistFrame_ST.at<float>(dbgpt) << std::endl; cv::Mat oMeanLastDistFrameNormalized; m_oMeanLastDistFrame.copyTo(oMeanLastDistFrameNormalized); cv::circle(oMeanLastDistFrameNormalized,dbgpt,5,cv::Scalar(1.0f)); cv::resize(oMeanLastDistFrameNormalized,oMeanLastDistFrameNormalized,DEFAULT_FRAME_SIZE); cv::imshow("d_last(x)",oMeanLastDistFrameNormalized); std::cout << std::fixed << std::setprecision(5) << " d_last(" << dbgpt << ") = " << m_oMeanLastDistFrame.at<float>(dbgpt) << std::endl; cv::Mat oMeanRawSegmResFrameNormalized; m_oMeanRawSegmResFrame_ST.copyTo(oMeanRawSegmResFrameNormalized); cv::circle(oMeanRawSegmResFrameNormalized,dbgpt,5,cv::Scalar(1.0f)); cv::resize(oMeanRawSegmResFrameNormalized,oMeanRawSegmResFrameNormalized,DEFAULT_FRAME_SIZE); cv::imshow("s_avg(x)",oMeanRawSegmResFrameNormalized); std::cout << std::fixed << std::setprecision(5) << " s_avg(" << dbgpt << ") = " << m_oMeanRawSegmResFrame_ST.at<float>(dbgpt) << std::endl; cv::Mat oMeanFinalSegmResFrameNormalized; m_oMeanFinalSegmResFrame_ST.copyTo(oMeanFinalSegmResFrameNormalized); cv::circle(oMeanFinalSegmResFrameNormalized,dbgpt,5,cv::Scalar(1.0f)); cv::resize(oMeanFinalSegmResFrameNormalized,oMeanFinalSegmResFrameNormalized,DEFAULT_FRAME_SIZE); cv::imshow("z_avg(x)",oMeanFinalSegmResFrameNormalized); std::cout << std::fixed << std::setprecision(5) << " z_avg(" << dbgpt << ") = " << m_oMeanFinalSegmResFrame_ST.at<float>(dbgpt) << std::endl; cv::Mat oDistThresholdFrameNormalized; m_oDistThresholdFrame.convertTo(oDistThresholdFrameNormalized,CV_32FC1,0.25f,-0.25f); cv::circle(oDistThresholdFrameNormalized,dbgpt,5,cv::Scalar(1.0f)); cv::resize(oDistThresholdFrameNormalized,oDistThresholdFrameNormalized,DEFAULT_FRAME_SIZE); cv::imshow("r(x)",oDistThresholdFrameNormalized); std::cout << std::fixed << std::setprecision(5) << " r(" << dbgpt << ") = " << m_oDistThresholdFrame.at<float>(dbgpt) << std::endl; cv::Mat oVariationModulatorFrameNormalized; cv::normalize(m_oVariationModulatorFrame,oVariationModulatorFrameNormalized,0,255,cv::NORM_MINMAX,CV_8UC1); cv::circle(oVariationModulatorFrameNormalized,dbgpt,5,cv::Scalar(255)); cv::resize(oVariationModulatorFrameNormalized,oVariationModulatorFrameNormalized,DEFAULT_FRAME_SIZE); cv::imshow("v(x)",oVariationModulatorFrameNormalized); std::cout << std::fixed << std::setprecision(5) << " v(" << dbgpt << ") = " << m_oVariationModulatorFrame.at<float>(dbgpt) << std::endl; cv::Mat oUpdateRateFrameNormalized; m_oUpdateRateFrame.convertTo(oUpdateRateFrameNormalized,CV_32FC1,1.0f/FEEDBACK_T_UPPER,-FEEDBACK_T_LOWER/FEEDBACK_T_UPPER); cv::circle(oUpdateRateFrameNormalized,dbgpt,5,cv::Scalar(1.0f)); cv::resize(oUpdateRateFrameNormalized,oUpdateRateFrameNormalized,DEFAULT_FRAME_SIZE); cv::imshow("t(x)",oUpdateRateFrameNormalized); std::cout << std::fixed << std::setprecision(5) << " t(" << dbgpt << ") = " << m_oUpdateRateFrame.at<float>(dbgpt) << std::endl; #endif //DISPLAY_SUBSENSE_DEBUG_INFO cv::bitwise_xor(oCurrFGMask,m_oLastRawFGMask,m_oCurrRawFGBlinkMask); cv::bitwise_or(m_oCurrRawFGBlinkMask,m_oLastRawFGBlinkMask,m_oBlinksFrame); m_oCurrRawFGBlinkMask.copyTo(m_oLastRawFGBlinkMask); oCurrFGMask.copyTo(m_oLastRawFGMask); cv::morphologyEx(oCurrFGMask,m_oFGMask_PreFlood,cv::MORPH_CLOSE, m_defaultMorphologyKernel); m_oFGMask_PreFlood.copyTo(m_oFGMask_FloodedHoles); cv::floodFill(m_oFGMask_FloodedHoles,cv::Point(0,0),UCHAR_MAX); cv::bitwise_not(m_oFGMask_FloodedHoles,m_oFGMask_FloodedHoles); cv::erode(m_oFGMask_PreFlood,m_oFGMask_PreFlood,m_defaultMorphologyKernel,cv::Point(-1,-1),3); cv::bitwise_or(oCurrFGMask,m_oFGMask_FloodedHoles,oCurrFGMask); cv::bitwise_or(oCurrFGMask,m_oFGMask_PreFlood,oCurrFGMask); cv::medianBlur(oCurrFGMask,m_oLastFGMask,m_nMedianBlurKernelSize); cv::dilate(m_oLastFGMask,m_oLastFGMask_dilated,m_defaultMorphologyKernel,cv::Point(-1,-1),3); cv::bitwise_and(m_oBlinksFrame,m_oLastFGMask_dilated_inverted,m_oBlinksFrame); cv::bitwise_not(m_oLastFGMask_dilated,m_oLastFGMask_dilated_inverted); cv::bitwise_and(m_oBlinksFrame,m_oLastFGMask_dilated_inverted,m_oBlinksFrame); m_oLastFGMask.copyTo(oCurrFGMask); cv::addWeighted(m_oMeanFinalSegmResFrame_LT,(1.0f-fRollAvgFactor_LT),m_oLastFGMask,(1.0/UCHAR_MAX)*fRollAvgFactor_LT,0,m_oMeanFinalSegmResFrame_LT,CV_32F); cv::addWeighted(m_oMeanFinalSegmResFrame_ST,(1.0f-fRollAvgFactor_ST),m_oLastFGMask,(1.0/UCHAR_MAX)*fRollAvgFactor_ST,0,m_oMeanFinalSegmResFrame_ST,CV_32F); const float fCurrNonZeroDescRatio = (float)nNonZeroDescCount/m_nTotRelevantPxCount; if(fCurrNonZeroDescRatio<LBSPDESC_NONZERO_RATIO_MIN && m_fLastNonZeroDescRatio<LBSPDESC_NONZERO_RATIO_MIN) { for(size_t t=0; t<=UCHAR_MAX; ++t) if(m_anLBSPThreshold_8bitLUT[t]>cv::saturate_cast<uchar>(m_nLBSPThresholdOffset+ceil(t*m_fRelLBSPThreshold/4))) --m_anLBSPThreshold_8bitLUT[t]; } else if(fCurrNonZeroDescRatio>LBSPDESC_NONZERO_RATIO_MAX && m_fLastNonZeroDescRatio>LBSPDESC_NONZERO_RATIO_MAX) { for(size_t t=0; t<=UCHAR_MAX; ++t) if(m_anLBSPThreshold_8bitLUT[t]<cv::saturate_cast<uchar>(m_nLBSPThresholdOffset+UCHAR_MAX*m_fRelLBSPThreshold)) ++m_anLBSPThreshold_8bitLUT[t]; } m_fLastNonZeroDescRatio = fCurrNonZeroDescRatio; if(m_bLearningRateScalingEnabled) { cv::resize(oInputImg,m_oDownSampledFrame_MotionAnalysis,m_oDownSampledFrameSize,0,0,cv::INTER_AREA); cv::accumulateWeighted(m_oDownSampledFrame_MotionAnalysis,m_oMeanDownSampledLastDistFrame_LT,fRollAvgFactor_LT); cv::accumulateWeighted(m_oDownSampledFrame_MotionAnalysis,m_oMeanDownSampledLastDistFrame_ST,fRollAvgFactor_ST); size_t nTotColorDiff = 0; for(int i=0; i<m_oMeanDownSampledLastDistFrame_ST.rows; ++i) { const size_t idx1 = m_oMeanDownSampledLastDistFrame_ST.step.p[0]*i; for(int j=0; j<m_oMeanDownSampledLastDistFrame_ST.cols; ++j) { const size_t idx2 = idx1+m_oMeanDownSampledLastDistFrame_ST.step.p[1]*j; nTotColorDiff += (m_nImgChannels==1)? (size_t)fabs((*(float*)(m_oMeanDownSampledLastDistFrame_ST.data+idx2))-(*(float*)(m_oMeanDownSampledLastDistFrame_LT.data+idx2)))/2 : //(m_nImgChannels==3) std::max((size_t)fabs((*(float*)(m_oMeanDownSampledLastDistFrame_ST.data+idx2))-(*(float*)(m_oMeanDownSampledLastDistFrame_LT.data+idx2))), std::max((size_t)fabs((*(float*)(m_oMeanDownSampledLastDistFrame_ST.data+idx2+4))-(*(float*)(m_oMeanDownSampledLastDistFrame_LT.data+idx2+4))), (size_t)fabs((*(float*)(m_oMeanDownSampledLastDistFrame_ST.data+idx2+8))-(*(float*)(m_oMeanDownSampledLastDistFrame_LT.data+idx2+8))))); } } const float fCurrColorDiffRatio = (float)nTotColorDiff/(m_oMeanDownSampledLastDistFrame_ST.rows*m_oMeanDownSampledLastDistFrame_ST.cols); if(m_bAutoModelResetEnabled) { if(m_nFramesSinceLastReset>1000) m_bAutoModelResetEnabled = false; else if(fCurrColorDiffRatio>=FRAMELEVEL_MIN_COLOR_DIFF_THRESHOLD && m_nModelResetCooldown==0) { m_nFramesSinceLastReset = 0; refreshModel(0.1f); // reset 10% of the bg model m_nModelResetCooldown = m_nSamplesForMovingAvgs/4; m_oUpdateRateFrame = cv::Scalar(1.0f); } else ++m_nFramesSinceLastReset; } else if(fCurrColorDiffRatio>=FRAMELEVEL_MIN_COLOR_DIFF_THRESHOLD*2) { m_nFramesSinceLastReset = 0; m_bAutoModelResetEnabled = true; } if(fCurrColorDiffRatio>=FRAMELEVEL_MIN_COLOR_DIFF_THRESHOLD/2) { m_fCurrLearningRateLowerCap = (float)std::max((int)FEEDBACK_T_LOWER>>(int)(fCurrColorDiffRatio/2),1); m_fCurrLearningRateUpperCap = (float)std::max((int)FEEDBACK_T_UPPER>>(int)(fCurrColorDiffRatio/2),1); } else { m_fCurrLearningRateLowerCap = FEEDBACK_T_LOWER; m_fCurrLearningRateUpperCap = FEEDBACK_T_UPPER; } if(m_nModelResetCooldown>0) --m_nModelResetCooldown; } } void BackgroundSubtractorSuBSENSE::getBackgroundImage(cv::OutputArray backgroundImage) const { CV_Assert(m_bInitialized); cv::Mat oAvgBGImg = cv::Mat::zeros(m_oImgSize,CV_32FC((int)m_nImgChannels)); for(size_t s=0; s<m_nBGSamples; ++s) { for(int y=0; y<m_oImgSize.height; ++y) { for(int x=0; x<m_oImgSize.width; ++x) { const size_t idx_nimg = m_voBGColorSamples[s].step.p[0]*y + m_voBGColorSamples[s].step.p[1]*x; const size_t nFloatIter = idx_nimg*4; float* oAvgBgImgPtr = (float*)(oAvgBGImg.data+nFloatIter); const uchar* const oBGImgPtr = m_voBGColorSamples[s].data+idx_nimg; for(size_t c=0; c<m_nImgChannels; ++c) oAvgBgImgPtr[c] += ((float)oBGImgPtr[c])/m_nBGSamples; } } } oAvgBGImg.convertTo(backgroundImage,CV_8U); } void BackgroundSubtractorSuBSENSE::getBackgroundDescriptorsImage(cv::OutputArray backgroundDescImage) const { CV_Assert(LBSP::DESC_SIZE==2); CV_Assert(m_bInitialized); cv::Mat oAvgBGDesc = cv::Mat::zeros(m_oImgSize,CV_32FC((int)m_nImgChannels)); for(size_t n=0; n<m_voBGDescSamples.size(); ++n) { for(int y=0; y<m_oImgSize.height; ++y) { for(int x=0; x<m_oImgSize.width; ++x) { const size_t idx_ndesc = m_voBGDescSamples[n].step.p[0]*y + m_voBGDescSamples[n].step.p[1]*x; const size_t nFloatIter = idx_ndesc*2; float* oAvgBgDescPtr = (float*)(oAvgBGDesc.data+nFloatIter); const ushort* const oBGDescPtr = (ushort*)(m_voBGDescSamples[n].data+idx_ndesc); for(size_t c=0; c<m_nImgChannels; ++c) oAvgBgDescPtr[c] += ((float)oBGDescPtr[c])/m_voBGDescSamples.size(); } } } oAvgBGDesc.convertTo(backgroundDescImage,CV_16U); } void BackgroundSubtractorSuBSENSE::apply(cv::InputArray image, cv::OutputArray fgmask, double learningRateOverride) { (*this)(image, fgmask, learningRateOverride); } 结合代码分析描述符阈值是如何动态计算的?
09-23
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值