test_chol

本文介绍了一个使用C++实现的Cholesky分解算法示例,该示例利用了Eigen库进行矩阵运算,并通过读取.mat文件来获取输入矩阵。通过对比输出结果与预期结果之间的差异,验证了算法的有效性和准确性。
// 定义控制台应用程序的入口点。
//


#include "stdafx.h"
#include <mat.h>  
#include <iostream>
#include <iomanip>
#include <Eigen/Dense>
#include <Eigen/LU>
#include <Eigen/Cholesky>
using Eigen::MatrixXd;
using Eigen::VectorXd;
using namespace std;
using namespace Eigen;
void chol(MatrixXd P,double c,MatrixXd &A)
{

LLT<MatrixXd> lltOfP(P);
MatrixXd L=lltOfP.matrixL();        //产生下三角矩阵
A<<c*L;


}
void mat_read(MatrixXd &P,MatrixXd &c,MatrixXd &A)
{


MATFile *pmatFileP = NULL,*pmatFilec = NULL,*pmatFileA = NULL;     
mxArray *pMxArrayP = NULL,*pMxArrayc = NULL,*pMxArrayA = NULL;    
double *inputP,*inputc,*outputA; 




pmatFileP = matOpen("inputP.mat","r");    
pMxArrayP = matGetVariable(pmatFileP,"inputP");    
inputP = (double*) mxGetData(pMxArrayP);  
mwSize M_P = mxGetM(pMxArrayP);  
mwSize N_P = mxGetN(pMxArrayP);    
MatrixXd tempP(M_P,N_P);    
for (int i=0; i<M_P; i++)    
for (int j=0; j<N_P; j++)    
tempP(i,j) = inputP[M_P*j+i];   
P=tempP;
matClose(pmatFileP);    
mxFree(inputP);  
//cout<<"tempP"<<endl<<fixed<<setprecision(5)<<tempP<<endl;


pmatFilec = matOpen("inputc.mat","r");    
pMxArrayc = matGetVariable(pmatFilec,"inputc");    
inputc = (double*) mxGetData(pMxArrayc);    
mwSize M_c = mxGetM(pMxArrayc);  
mwSize N_c = mxGetN(pMxArrayc);    
MatrixXd tempc(M_c,N_c);    
for (int i=0; i<M_c; i++)    
for (int j=0; j<N_c; j++)    
tempc(i,j) = inputc[M_c*j+i];  
c=tempc;
matClose(pmatFilec);    
mxFree(inputc);  
//cout<<"tempc"<<endl<<fixed<<setprecision(5)<<tempc<<endl;


pmatFileA = matOpen("outputA.mat","r");    
pMxArrayA = matGetVariable(pmatFileA,"outputA");    
outputA = (double*) mxGetData(pMxArrayA);  
mwSize M_X = mxGetM(pMxArrayA);  
mwSize N_X = mxGetN(pMxArrayA);    
MatrixXd tempX(M_X,N_X);    
for (int i=0; i<M_X; i++)    
for (int j=0; j<N_X; j++)    
tempX(i,j) = outputA[M_X*j+i];  
A=tempX;
matClose(pmatFileA);    
mxFree(outputA);  
//cout<<"tempX"<<endl<<fixed<<setprecision(5)<<tempX<<endl;




}
void test_chol()
{
MatrixXd P,c,A;
mat_read(P,c,A);
int length=400;
int wideth=60;

MatrixXd inputP(3,3),outputA(3,3);
double inputc;
for(int ut=0;ut<length;++ut)
for(int uk=0;uk<wideth;++uk)
{
int index=ut*wideth+uk;
for(int i=0;i<3;++i)   /////从P中提取输入数据
for(int j=0;j<3;++j)
{
inputP(i,j)=P(index*3+i,j);
}
inputc=c(index,0);      //////从c中提取输入数据

chol(inputP,inputc,outputA);


/////检验outputX与matlab中X的输出的差异,如果大于1e-5,报错
double varience;
double permissibleError=1e-18;
for(int i=0;i<3;++i)
for(int j=0;j<3;++j)
{


varience=outputA(i,j)-A(index*3+i,j);
if(varience>=permissibleError)
{
printf("sigmas call error!\n");
printf("(%d,%d)th call error\n",ut,uk);
printf("outputA(%d,%d) error\n",i,j);
printf("outputA=%.6f,X=%.6f,varience=%.6f\n",outputA(i,j),A(index*3+i,j),varience);
return ;
}
}
}
printf("sigmas call success!\n");
}


int _tmain(int argc, _TCHAR* argv[])
{
test_chol();
return 0;
}
from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.metrics import accuracy_score, recall_score, f1_score import pandas as pd # 加载数据 url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/processed.cleveland.data' column_names = [ 'age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach', 'exang', 'oldpeak', 'slope', 'ca', 'thal', 'num' ] data = pd.read_csv(url, names=column_names) X = data.drop('target', axis=1) y = data['target'] # 数据标准化 standard = StandardScaler() X = standard.fit_transform(X) # 划分训练集和测试集 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # SVM 模型训练与评估 svm_model = SVC() svm_model.fit(X_train, y_train) svm_predictions = svm_model.predict(X_test) svm_accuracy = accuracy_score(y_test, svm_predictions) svm_recall = recall_score(y_test, svm_predictions) svm_f1 = f1_score(y_test, svm_predictions) print(f"SVM 准确率: {svm_accuracy}") print(f"SVM 召回率: {svm_recall}") print(f"SVM F1 值: {svm_f1}") # KNN 模型训练与评估 # 定义参数范围 param_grid = {'n_neighbors': [3, 5, 7, 9, 11]} # 创建 GridSearchCV 对象 grid_search = GridSearchCV(KNeighborsClassifier(), param_grid, cv=5) # 进行网格搜索 grid_search.fit(X_train, y_train) # 输出最优参数 best_k = grid_search.best_params_['n_neighbors'] print(f"最优 k 值: {best_k}") knn_model = KNeighborsClassifier(n_neighbors=best_k) knn_model.fit(X_train, y_train) knn_predictions = knn_model.predict(X_test) knn_accuracy = accuracy_score(y_test, knn_predictions) knn_recall = recall_score(y_test, knn_predictions) knn_f1 = f1_score(y_test, knn_predictions) print(f"KNN 准确率: {knn_accuracy}") print(f"KNN 召回率: {knn_recall}") print(f"KNN F1 值: {knn_f1}")
11-14
import numpy as np import pandas as pd names=['年龄','性别','类型','静态血压','血清胆甾醇','空腹血糖','静息心电图','最大心率','运动心绞痛','ST压低','ST段峰值斜率','主要血管数','thal','心脏病的诊断'] data=pd.read_csv('https://archive.ics.uci.edu/ml/datasets/Heart+Disease',names=names) data.head() #删除缺失值的行 data=data.replace(to_replace="?",value=np.nan) data.dropna(inplace=True) data.head() def data_cut1(data, name, bins, labels): columnNum = data.columns.values.tolist().index(name) # 需要离散化的行号 sr = pd.cut(data[name], bins=bins, labels=labels) data.drop(name, axis=1, inplace=True) data.insert(columnNum, name, sr) def data_cut2(data, name, boxNum, labels): columnNum = data.columns.values.tolist().index(name) # 需要离散化的行号 sr = pd.cut(data[name], boxNum, labels=labels) data.drop(name, axis=1, inplace=True) data.insert(columnNum, name, sr) data_cut2(data,'年龄',3,['壮年','中年','老年']) data_cut1(data,'静态血压',[0,90,140,200],['偏低','正常','偏高']) data_cut1(data,'血清胆甾醇',[0,200,300,400,500],['过低','低','中','高']) data_cut1(data,'最大心率',[0,100,170,300],['过慢','正常','过快']) data_cut1(data,'ST压低',[-0.1,1,3,6],['低','正常','高']) data.head() #训练数据集 data_train=data[data.index<=250] data_train.head() #测试数据集 data_test=data[data.index>250] data_test.head() #获得除去标签的数据 data_test1=data_test.drop({'心脏病的诊断'},axis=1) data_test1.head() #获取对应的标签 data_test_labels=data_test['心脏病的诊断'] data_test_labels.head() #计算概率 P_Ci,P_XinCi,labels=bayesCalcP(data_train,'心脏病的诊断') P_Ci sum_right=0 #预测正确的个数 sum_wrong=0 #预测错误的个数 #将测试集转化为二维list data_test_list = data_test1.values.tolist() #将测试集对应的标签转化成list data_test_labels_list=data_test_labels.values.tolist() names=['年龄','性别','类型','静态血压','血清胆甾醇','空腹血糖','静息心电图','最大心率','运动心绞痛','ST压低','ST段峰值斜率','主要血管数','thal'] for i in range(len(data_test_list)): if bayesClassify(data_test_list[i],list(labels),names,P_Ci,P_XinCi)==data_test_labels_list[i]: sum_right+=1 else: sum_wrong+=1 print("预测正确") print("个数:",sum_right) print("比例:",sum_right/len(data_test_list)) print("预测错误") print("个数:",sum_wrong) print("比例:",sum_wrong/len(data_test_list))
11-07
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import tkinter as tk from tkinter import font from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score from sklearn.naive_bayes import GaussianNB from pgmpy.models import DiscreteBayesianNetwork from pgmpy.estimators import MaximumLikelihoodEstimator from pgmpy.inference import VariableElimination # ================= 中文支持初始化 ================= # def init_gui(): """启动 Tkinter GUI 界面(仅用于测试字体支持)""" root = tk.Tk() custom_font = font.Font(family="SimHei", size=12) label = tk.Label(root, text="贝叶斯模型评估界面", font=custom_font) label.pack() return root # 设置 Matplotlib 支持中文显示 plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False # ================= 数据加载与预处理 ================= # def load_data(file_path): """加载 CSV 数据,并对 heart.csv 进行浮点列离散化""" data = pd.read_csv(file_path) if 'target' in data.columns: for col in data.select_dtypes(include=['float64']).columns: data[col] = pd.cut(data[col], bins=5, labels=[0, 1, 2, 3, 4]) return data # ================= 模型评估函数 ================= # def evaluate_model(y_true, y_pred, model_name): """计算并输出模型性能指标,并绘制混淆矩阵""" cm = confusion_matrix(y_true, y_pred) acc = accuracy_score(y_true, y_pred) prec = precision_score(y_true, y_pred, average='weighted') rec = recall_score(y_true, y_pred, average='weighted') f1 = f1_score(y_true, y_pred, average='weighted') print(f"\n{model_name} 模型评估结果:") print(f"准确率: {acc:.4f}") print(f"精确率: {prec:.4f}") print(f"召回率: {rec:.4f}") print(f"F1分数: {f1:.4f}") plt.figure(figsize=(8, 6)) sns.heatmap(cm, annot=True, fmt='d', cmap='Blues') plt.xlabel('预测标签') plt.ylabel('真实标签') plt.title(f'{model_name} 混淆矩阵') plt.show() return acc, prec, rec, f1 # ================= 数据离散化处理 ================= # def discretize_continuous_features(data, continuous_vars, bins=5): """ 对指定的连续变量进行离散化处理 """ for var in continuous_vars: if var in data.columns: data[var] = pd.cut(data[var], bins=bins, labels=False) return data # ================= 贝叶斯网络构建 ================= # def build_heart_bayesian_network(data): """构建基于医学知识的心脏病贝叶斯网络结构""" model_structure = [ ('age', 'trestbps'), ('age', 'chol'), ('age', 'thalach'), ('sex', 'trestbps'), ('sex', 'chol'), ('cp', 'trestbps'), ('cp', 'chol'), ('trestbps', 'target'), ('chol', 'target'), ('thalach', 'target'), ('oldpeak', 'target'), ('slope', 'target'), ('ca', 'target'), ('thal', 'target'), ('fbs', 'chol'), ('fbs', 'target'), ('restecg', 'trestbps'), ('restecg', 'target'), ('exang', 'target') ] cols = data.columns.tolist() invalid_edges = [(u, v) for u, v in model_structure if u not in cols or v not in cols] if invalid_edges: raise ValueError(f"以下边包含不存在的节点: {invalid_edges}") model = DiscreteBayesianNetwork(model_structure) model.fit(data, estimator=MaximumLikelihoodEstimator) return model def bayesian_network_classification(data, feature_cols, target_col): """使用贝叶斯网络进行分类推理""" model = build_heart_bayesian_network(data) infer = VariableElimination(model) def predict(row): evidence = row[feature_cols].to_dict() try: query = infer.map_query(variables=[target_col], evidence=evidence) return query[target_col] except: return data[target_col].mode()[0] predictions = [] for _, row in data.iterrows(): predictions.append(predict(row)) return evaluate_model(data[target_col], predictions, "贝叶斯网络") # ================= 朴素贝叶斯分类器 ================= # def naive_bayes_classification(X_train, X_test, y_train, y_test): """使用高斯朴素贝叶斯分类器""" model = GaussianNB() model.fit(X_train, y_train) y_pred = model.predict(X_test) return evaluate_model(y_test, y_pred, "朴素贝叶斯") # ================= 主程序入口 ================= # if __name__ == "__main__": # 初始化 GUI(可选) root = init_gui() # 文件路径(请根据实际路径修改) wine_file = r'E:\桌面\malearning\wine.csv' heart_file = r'E:\桌面\malearning\heart.csv' # 加载数据 wine_data = load_data(wine_file) heart_data = load_data(heart_file) # === 心脏病数据集特殊处理 === continuous_vars = ['age', 'trestbps', 'chol', 'thalach', 'oldpeak'] # 对连续变量进行离散化 heart_data = discretize_continuous_features(heart_data, continuous_vars) # 确保其余特征为字符串类型以适配贝叶斯网络 for col in heart_data.columns: if col != 'target': heart_data[col] = heart_data[col].astype(str) list=['Alcohol','Malic.acid','Ash','Acl','Mg','Phenols','Flavanoids','Nonflavanoid.phenols','Proanth','Color.int','Hue','OD','Proline'] wine_data = discretize_continuous_features(wine_data,list) # === 葡萄酒数据集 === X_wine = wine_data.drop(['Wine'], axis=1) y_wine = wine_data['Wine'] X_train, X_test, y_train, y_test = train_test_split(X_wine, y_wine, test_size=0.2, random_state=42) print("=== 朴素贝叶斯 - 葡萄酒数据集 ===") nb_acc, nb_prec, nb_rec, nb_f1 = naive_bayes_classification(X_train, X_test, y_train, y_test) print("\n=== 贝叶斯网络 - 葡萄酒数据集 ===") wine_features = wine_data.columns.tolist()[1:] wine_target = wine_data.columns.tolist()[0] wine_bn_structure = [(wine_target, feat) for feat in wine_features] wine_bn_model = DiscreteBayesianNetwork(wine_bn_structure) wine_bn_model.fit(wine_data, estimator=MaximumLikelihoodEstimator) infer = VariableElimination(wine_bn_model) def wine_predict(row): evidence = row[wine_features].to_dict() try: query = infer.map_query(variables=[wine_target], evidence=evidence) return query[wine_target] except: return wine_data[wine_target].mode()[0] wine_predictions = [wine_predict(row) for _, row in wine_data.iterrows()] bn_acc, bn_prec, bn_rec, bn_f1 = evaluate_model(wine_data[wine_target], wine_predictions, "贝叶斯网络") # === 心脏病数据集 === X_heart = heart_data.drop(['target'], axis=1) y_heart = heart_data['target'] X_h_train, X_h_test, y_h_train, y_h_test = train_test_split(X_heart, y_heart, test_size=0.2, random_state=42) print("\n=== 朴素贝叶斯 - 心脏病数据集 ===") heart_nb_acc, heart_nb_prec, heart_nb_rec, heart_nb_f1 = naive_bayes_classification( X_h_train, X_h_test, y_h_train, y_h_test ) print("\n=== 贝叶斯网络 - 心脏病数据集 ===") heart_bn_acc, heart_bn_prec, heart_bn_rec, heart_bn_f1 = bayesian_network_classification( heart_data, X_heart.columns.tolist(), 'target' ) # === 绘图比较两个模型在两个数据集上的表现 === models = ['朴素贝叶斯', '贝叶斯网络'] metrics = ['准确率', '精确率', '召回率', 'F1分数'] wine_scores = [[nb_acc, nb_prec, nb_rec, nb_f1], [bn_acc, bn_prec, bn_rec, bn_f1]] heart_scores = [[heart_nb_acc, heart_nb_prec, heart_nb_rec, heart_nb_f1], [heart_bn_acc, heart_bn_prec, heart_bn_rec, heart_bn_f1]] fig, axes = plt.subplots(1, 2, figsize=(14, 6)) x = np.arange(len(metrics)) width = 0.35 # 葡萄酒数据集 axes[0].bar(x - width/2, wine_scores[0], width, label='朴素贝叶斯') axes[0].bar(x + width/2, wine_scores[1], width, label='贝叶斯网络') axes[0].set_title('葡萄酒数据集表现') axes[0].set_ylabel('分数') axes[0].set_xticks(x, metrics) axes[0].legend() # 心脏病数据集 axes[1].bar(x - width/2, heart_scores[0], width, label='朴素贝叶斯') axes[1].bar(x + width/2, heart_scores[1], width, label='贝叶斯网络') axes[1].set_title('心脏病数据集表现') axes[1].set_ylabel('分数') axes[1].set_xticks(x, metrics) axes[1].legend() plt.tight_layout() plt.show()
05-26
【无人机】基于改进粒子群算法的无人机路径规划研究[和遗传算法、粒子群算法进行比较](Matlab代码实现)内容概要:本文围绕基于改进粒子群算法的无人机路径规划展开研究,重点探讨了在复杂环境中利用改进粒子群算法(PSO)实现无人机三维路径规划的方法,并将其与遗传算法(GA)、标准粒子群算法等传统优化算法进行对比分析。研究内容涵盖路径规划的多目标优化、避障策略、航路点约束以及算法收敛性和寻优能力的评估,所有实验均通过Matlab代码实现,提供了完整的仿真验证流程。文章还提到了多种智能优化算法在无人机路径规划中的应用比较,突出了改进PSO在收敛速度和全局寻优方面的优势。; 适合人群:具备一定Matlab编程基础和优化算法知识的研究生、科研人员及从事无人机路径规划、智能优化算法研究的相关技术人员。; 使用场景及目标:①用于无人机在复杂地形或动态环境下的三维路径规划仿真研究;②比较不同智能优化算法(如PSO、GA、蚁群算法、RRT等)在路径规划中的性能差异;③为多目标优化问题提供算法选型和改进思路。; 阅读建议:建议读者结合文中提供的Matlab代码进行实践操作,重点关注算法的参数设置、适应度函数设计及路径约束处理方式,同时可参考文中提到的多种算法对比思路,拓展到其他智能优化算法的研究与改进中。
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值