基础算法matlab

常用算法

knn

% clc
% close all
% clear
training = [mvnrnd([2 2],eye(2),50);mvnrnd([-2 -2],2*eye(2),50);mvnrnd([-2 -2],2*eye(2),50)];
group = [ones(50,1);2*ones(50,1);3*ones(50,1)];
gscatter(training(:,1),training(:,2),group,'rcb','*x+');
hold on;
sample = unifrnd(-3,3,20,2);
K = 3;
Mdl = fitcknn(training,group,'NumNeighbors',K);
ck = predict(Mdl,sample); 
gscatter(sample(:,1),sample(:,2),ck,'rcb','ods');

7.6 试编程实现AODE分类器,并以西瓜数据集3.0为训练集,对p.151的“测1”样本进行判别。
答:编程代码附后。
在实现AODE分类器过程中,需要应用(7.23)~(7.25)式计算各个概率值。由于西瓜数据集3.0中含有连续属性,需要对(7.24)和(7.25)计算式进行相应的拓展。

对于(7.24)式,若为连续属性,则按下式计算:

其中为正态分布的概率密度函数,参数为。
对于(7.25)式,存在多种情况:
(a). 若为离散属性,为连续属性,此时:

(b). 若为连续属性,为离散属性,此时:

©. 若为连续属性,为连续属性,此时:

需要注意的是,对于上面(a),(b)两种情况下,求取正态分布参数时,可能因为子集中样本数太少而无法估计。在编程中,若样本数为零,则假定为正态分布,亦即;若仅一个样本,则将平均值设为该唯一取值,方差设为1.

function predict  = aoop(x, X, Y)
%      平均独依赖贝叶斯分类器
%      x 待预测样本
%      X 训练样本特征值
%      Y 训练样本标记
%      laplace 是否采用“拉普拉斯修正”,默认为真
%      mmin 作为父属性最少需要的样本数
laplace=1;
mmin=3;
XX = cell2table(X);
C = unique(Y);  % 所有可能标记
N = zeros(1,size(x,2));
%% 
for i = 1:size(x,2)
    N(i) = size(unique(XX(:,i)),1);
end
%% 
p = zeros(size(C,1),1);  % 存储概率值
% disp('===============平均独依赖贝叶斯分类器(AODE)===============')
for i= 1:size(C,1)
 
    c = C(i);
    % --------求取类先验概率P(c)--------
    disp('--------求取类先验概率P(c)--------')
    Xc = X(Y==c,:);
%     Xc = X(Y==1,:);
    Xcc = cell2table(Xc);
    pc = (size(Xc,1) + laplace) / (size(X,1) + laplace * size(C,1));  % 类先验概率
%     disp('--------求取父属性概率P(c,xi)--------')
    p_cxi = zeros(1,size(x,2));  % 将计算结果存储在一维向量p_cxi中
    for ii = 1:size(x,2)
        if ~strcmp(class(x{1,ii}),class(X{1,ii}))
%             disp('样本数据第%d个特征的数据类型与训练样本数据类型不符,无法预测!')
            return 
        end
        if isa(x{1,ii},'categorical')%strcmp(class(x{1,1}),'categorical')
                % 若为字符型特征,按离散取值处理
                Xci = 0;
                for jj = 1:size(Xc(:,ii))
                    if Xc{jj,ii}==x{1,ii}
                       Xci = Xci+1;
                    end
                end
                p_cxi(ii) = (Xci + laplace) / (size(XX,1)+ laplace * size(C,1) * N(ii));
                
                
         elseif isa(x{1,ii}, 'double')
                % 若为浮点型特征,按高斯分布处理
                
                Xci = cell2mat(Xc(:,ii));
                u = mean(Xci);
                sigma = std(Xci);
                p_cxi(ii) = pc / sqrt(2 * pi) / sigma * exp(-(x{1,ii} - u)^2 / 2 / sigma ^ 2);
        else
                disp('目前只能处理字符型和浮点型数据,对于其他类型有待扩展相应功能。')
                return       
        end   
    end
    % --------求取父属性条件依赖概率P(xj|c,xi)--------
    p_cxixj = eye(size(x,2));  % 将计算结果存储在二维向量p_cxixj中
    for ii = 1:size(x,2)
            for jj = 1:size(x,2)
                if ii == jj
                    continue
                end
                % ------根据xi和xj是离散还是连续属性分为多种情况-----
                if isa(x{1,ii},'categorical') == isa(x{1,jj}, 'categorical')
                    Xci = [];
                    for jjj = 1:size(Xc(:,ii))
                        if Xc{jjj,ii}==x{1,ii}
                            Xci = [Xci,jjj];
                        end
                    end
                    
                    
                    Xcij = 0;
                    for jjj = 1:size(Xci,2)
                        if Xc{Xci(jjj), jj} == x{1, jj}
                            Xcij = Xcij + 1;
                        end
                    end
                    p_cxixj(ii, jj) = (Xcij + laplace) / (size(Xci,2) + laplace * N(jj));
                end
                if isa(x{1,ii},'categorical') && isa(x{1,jj}, 'double')
                    Xci = 0;
                    Xcij = [];
                    for jjj = 1:size(Xc(:,ii))
                        if Xc{jjj,ii}==x{1,ii}
                            Xci = Xci+1;
                            Xcij = [Xcij;Xcc(jjj,:)];
                        end
                    end
                    % 若子集Dc,xi数目少于2个,则无法用于估计正态分布参数,
                    % 则将其设为标准正态分布
                    if size(Xcij,1) == 0
                        u = 0;
                    else
                        Xcij(:,jj)
                        table2array(Xcij(:,jj))
                        u = mean(table2array(Xcij(:,jj)));
                    end
                    if size(Xcij,1) < 2
                        sigma = 1;
                    else
                        sigma = std(table2array(Xcij(:,jj)));
                    end
                    p_cxixj(ii, jj) = 1/(sqrt(2 * pi)*sigma) * exp(-(x{1,jj} - u)^ 2 / (2 * sigma ^ 2));
%                     p_cxixj(ii, jj) = 1 / sqrt(2 * pi) / ...
%                                     sigma * exp(-(x{1,jj} - u) ^ 2 / 2 / sigma ^ 2);
                end
                if isa(x{1,jj},'categorical') && isa(x{1,ii}, 'double')
                    Xci = 0;
                    Xcij = [];
                    for jjj = 1:size(Xc(:,jj))
                        if Xc{jjj,jj}==x{1,jj}
                            Xci = Xci+1;
                            Xcij = [Xcij;Xcc(jjj,:)];
                        end
                    end
                    % 若子集Dc,xi数目少于2个,则无法用于估计正态分布参数,
                    % 则将其设为标准正态分布
                    if size(Xcij,1) == 0
                        u = 0;
                    else
                        u = mean(table2array(Xcij(:,ii)));
                    end
                    if size(Xcij,1) < 2
                        sigma = 1;
                    else
                       
                        sigma = std(table2array(Xcij(:,ii)));
                    end
                    p_cxixj(ii, jj) = 1/(sqrt(2 * pi)*sigma) * exp(-(x{1,ii} - u)^ 2 / (2 * sigma ^ 2))*p_cxi(jj) / p_cxi(ii);
                end
                if isa(x{1,ii},'double') && isa(x{1,jj}, 'double')
                    Xcij = Xcc(:,[ii,jj]);
                    Xcij = table2array(Xcij);  
                    mui = mean(Xcij);
                    sigmaxie = cov(Xcij);
                    temp = pc./p_cxi(ii);
                    p_cxixj(ii,jj) = (1/(2*pi*sqrt(det(sigmaxie))))*exp(-0.5*([x{1,ii},x{1,jj}]-mui)/(sigmaxie)*([x{1,ii},x{1,jj}]-mui)').*temp; 
%                    p_cxixj(ii,jj) = ((1/(2*pi*det(sigmaxie)^(0.5)))*exp(-([x{1,ii} x{1,jj}]-mui)*sigmaxie*([x{1,ii} x{1,jj}]-mui)'/2))*pc/p_cxi(ii); 
                end
            end
            
    end
    sump = 0;
    for iij = 1:size(x,2)
        Xci = 0;
        for jjj = 1:size(X,1)
            if X{jjj,2}==x{1,2}
                Xci = Xci+1
            end
         end
         if Xci >= mmin
            sump = sump + p_cxi(iij)*prod(p_cxixj(iij,:))
         end
    end
    p(i) = sump
end
[~,index]=max(p);
predict = C(index);
p
end
clear;clc;
load m1.mat
load m2.mat
%               主程序
%  ====================================
%4.3 西瓜数据集3.0
%  FeatureName=['色泽','根蒂','敲声','纹理','脐部','触感','密度','含糖率']
%  LabelName={1:'好瓜',0:'坏瓜'}

X = table2cell(xigua1);
x  = table2cell(XTT1);
x = X(17,:);
x{1,8} = 0.8;
Y = [ones(8,1);-1*ones(9,1)];
% x = {'青绿', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', 0.697, 0.460};  % 测试例"测1"
ptict  = aoop(x, X, Y) ; % 此时不用拉普拉斯修正,方便与教材对比计算结果

LLE

function [Y] = lle(X,K,d)
X = X';
[D,N] = size(X);

X2 = sum(X.^2,1);
distance = repmat(X2,N,1)+repmat(X2',1,N)-2*X'*X;

[~,index] = sort(distance);
neighborhood = index(2:(1+K),:);

if(K>D)    
  tol=1e-3; 
else
  tol=0;
end

W = zeros(K,N);
for ii=1:N
   z = X(:,neighborhood(:,ii))-repmat(X(:,ii),1,K); 
   C = z'*z;                                        
   C = C + eye(K,K)*tol*trace(C);                   
   W(:,ii) = C\ones(K,1);                           
   W(:,ii) = W(:,ii)/sum(W(:,ii));                  
end
M = sparse(1:N,1:N,ones(1,N),N,N,4*K*N); 
for ii=1:N
   w = W(:,ii);
   jj = neighborhood(:,ii);
   M(ii,jj) = M(ii,jj) - w';
   M(jj,ii) = M(jj,ii) - w;
   M(jj,jj) = M(jj,jj) + w*w';
end

options.disp = 0; 
options.isreal = 1; 
options.issym = 1; 
options.v0=ones(N,1); 
[Y,~] = eigs(M,d+1,0,options);
Y = Y(:,1:d)'*sqrt(N); 
Y = Y';



谱聚类

function C = spectral(dataSet,sigma, num_clusters)
% 谱聚类算法
% 使用Normalized相似变换
% 输入  : W              : N-by-N 矩阵, 即连接矩阵
%        sigma          : 高斯核函数,sigma值不能为0
%        num_clusters   : 分类数
%
% 输出  : C : N-by-1矩阵 聚类结果,标签值
%   
    dataSet=dataSet/max(max(abs(dataSet)));
    
    Z=pdist(dataSet);
    W=squareform(Z);
    format long
    m = size(W, 1);
    %计算相似度矩阵  相似度矩阵由权值矩阵得到,实践中一般用高斯核函数
    W = W.*W;   %平方
    W = -W/(2*sigma*sigma);
    S = full(spfun(@exp, W)); % 在这里S即为相似度矩阵,也就是这不在以邻接矩阵计算,而是采用相似度矩阵

    %获得度矩阵D
    D = full(sparse(1:m, 1:m, sum(S))); %所以此处D为相似度矩阵S中一列元素加起来放到对角线上,得到度矩阵D

    % 获得拉普拉斯矩阵 Do laplacian, L = D^(-1/2) * S * D^(-1/2)
    L = eye(m)-(D^(-1/2) * S * D^(-1/2)); %拉普拉斯矩阵

    % 求特征向量 V
    %  eigs 'SM';绝对值最小特征值
    [V, ~] = eigs(L, num_clusters, 'SM');
    % 对特征向量求k-means
    C=kmeans(V,num_clusters);
end

邻接矩阵转图





import networkx as nx  # 导入NetworkX包,为了少打几个字母,将其重命名为nx
import matplotlib.pyplot as plt  # 导入绘图包matplotlib
import numpy as np

# matrix = [[0 for x in range(8)] for y in range(8)]  # 8*8的零矩阵
matrix = [[0, 1, 1, 1, 0, 0, 1, 0],
          [1, 0, 1, 1, 1, 0, 0, 0],
          [1, 1, 0, 0, 1, 1, 1, 0],
          [1, 1, 0, 0, 1, 0, 1, 0],
          [0, 1, 1, 1, 0, 1, 1, 1],
          [0, 0, 1, 0, 1, 0, 0, 1],
          [1, 0, 1, 1, 1, 0, 0, 1],
          [0, 0, 0, 0, 1, 1, 1, 0]]
colors = [1, 2, 3, 3, 1, 2, 2, 3]
# matrix = np.array(matrix)
# 创建图
G = nx.Graph()  # 建立一个空的无向图
v = range(1, 8)  # 一维行向量,从1到8递增
G.add_nodes_from(v)  # 从v中添加结点,相当于顶点编号为1到8
for x in range(0, len(matrix)):  # 添加边
    for y in range(0, len(matrix)):
        if matrix[x][y] == 1:
            G.add_edge(x, y)
            print(x, y)

    # 绘制网络图G,带标签,           用指定颜色给结点上色
nx.draw(G, with_labels=True, node_color=colors)
plt.show()

马尔可夫使用

from random import randint
import numpy as np
from hmmlearn import hmm


def hmm_forward(A, B, pi, O, V):
    T = len(O)
    N = len(A[0])
    # step1 初始化
    alpha = [[0] * T for _ in range(N)]
    for i in range(N):
        alpha[i][0] = pi[i] * B[i][V.index(O[0])]

    # step2 计算alpha(t)
    for t in range(1, T):
        for i in range(N):
            temp = 0
            for j in range(N):
                temp += alpha[j][t - 1] * A[j][i]
            alpha[i][t] = temp * B[i][V.index(O[t])]

    # step3  V.index(O[t])
    proba = 0
    for i in range(N):
        proba += alpha[i][-1]
    return proba, alpha


def hmm_backward(A, B, pi, O, V):
    T = len(O)
    N = len(A[0])
    # step1 初始化
    beta = [[0] * T for _ in range(N)]
    for i in range(N):
        beta[i][-1] = 1

    # step2 计算beta(t)  V.index(O[t + 1])
    for t in reversed(range(T - 1)):
        for i in range(N):
            for j in range(N):
                beta[i][t] += A[i][j] * B[j][V.index(O[t + 1])] * beta[j][t + 1]

    # step3
    proba = 0
    for i in range(N):
        proba += pi[i] * B[i][V.index(O[0])] * beta[i][0]
    return proba, beta


'''def TODO_EM(Q, V, A, B, pi, v):
    T = len(O)
    N = len(A[0])
    # 第一步
    _, alpha2 = hmm_forward(A, B, pi, O, v)
    _, beta = hmm_backward(A, B, pi, O, v)
    alpha2 = np.array(alpha2)
    beta = np.array(beta)

    EY = 0
    Y_i = np,zeros
    for j in range(N):

        EY = EY + np.vdot(alpha2[j], beta[j])
    print(EY)

    #

    return 0
'''


def hmm_obs_seq(Q, V, A, B, pi, T):
    """
    :param Q: 状态集合
    :param V: 观测集合
    :param A: 状态转移概率矩阵
    :param B: 观察概率矩阵
    :param pi: 初始概率分布
    :param T: 观察序列和状态序列的长度
    """
    q_0 = np.random.choice(Q, size=1, p=pi)
    o_0 = list(np.random.choice(V, size=1, p=B[Q.index(q_0)]))
    o_t = []
    o_t.extend(o_0)
    for t in range(1, T):
        q_t = np.random.choice(Q, size=1, p=A[Q.index(q_0)])
        o_t.extend(np.random.choice(V, size=1, p=B[Q.index(q_t)]))
    for i in range(T):
        o_t[i] = [V.index(o_t[i])]
    return o_t


# 《统计学习方法》中的例子  V.index(o_0)
Q = [1, 2, 3]
V = ['红', '白']
A = np.array([[0.5, 0.2, 0.3], [0.3, 0.5, 0.2], [0.2, 0.3, 0.5]])
B = np.array([[0.5, 0.5], [0.4, 0.6], [0.7, 0.3]])
pi = [0.2, 0.4, 0.4]
'''T = 8
O = hmm_obs_seq(Q, V, A, B, pi, T)
print(O)
proba, alpha = hmm_forward(A, B, pi, O, V)
print(alpha)
TODO_EM(Q, V, A, B, pi, V)'''
X2 = np.array([[0], [1], [0]])
data_size = 100
whole_data = []
lengths = []
n_observations = len(V)
for i in range(data_size):
    sequence_length = randint(3, 10)
    data = hmm_obs_seq(Q, V, A, B, pi, sequence_length)
    whole_data.append(data)
    lengths.append(sequence_length)
whole_data = np.concatenate(whole_data)
lengths = ([3])
hmm_model = hmm.MultinomialHMM(n_components=len(Q),
                               n_iter=1000000,  # 要执行的最大迭代次数
                               tol=0.00000001,  # 收敛阈值。如果对数可能性增益低于该值,EM将停止。
                               verbose=False,  # 如果为True,则每次迭代收敛报告将打印到sys。斯特德尔。
                               )
'''hmm_model.fit(X2, lengths)

# 学习之后,查看参数
print(f"结束学习 ", str(data_size) + "次")
# print('因为是无监督学习,所以模型不会把 A B 排定先后顺序,但是 三个参数是相互关联的,所以顺序其实无关系')
print('初始概率')
print(hmm_model.startprob_, '\n')
print('状态转移矩阵')
print(hmm_model.transmat_, '\n')
print('从隐藏状态到 显示状态的发散矩阵')
print(hmm_model.emissionprob_, '\n')'''
hmm_model.startprob_ = pi
hmm_model.transmat_ = A
hmm_model.emissionprob_ = B
seen = np.array([[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1]]).T
logprob, box = hmm_model.decode(seen, algorithm="viterbi")
print(logprob)
print(box + 1)
box = hmm_model.predict(seen)
print("f1g5edg")
print(box + 1)
'''print("The ball picked:", ", ".join(map(lambda x: observations[x], seen)))
print("The hidden box", ", ".join(map(lambda x: states[x], box)))'''

马尔可夫聚类

function [C,p] = Markovclustering(data,K,mode)
% mode    	先降维后聚类
if nargin<3
    mode = 1;
else
    switch mode
        case 'PCA'
            mode = 1;
        case 'LLE'
            mode = 2;
        otherwise   
            
    end
end
if mode == 1
    if size(data,2) > 2
        [~, score] = pca(data);
        p = score(:,1:2);
    else
        p = data;
    end
else
    if size(data,2) > 2
%         [~, data] = pca(data);
        score = lle(data,12,2);
        p = score;
    else
        p = data;
    end
end


for i = 1:length(p)
    for j =1:length(p)
        W(i,j) = sqrt(sum((p(i,:)-p(j,:)).^2));%根据距离初始化无向图的边
    end
end
preW=W;
while 1
    x=repmat(sum(W),length(p),1);
    W=W./x;
    W=W*W;                      %马尔科夫状态转移
    if sum(sum(preW-W))<1e-15
        break;
    end
    preW=W;
end
[C,~] = kmeans(W(:,1),K);  
end

极其简单的梯度下降

x = 0:0.1:10;
y = 4*x + 8*rand(1,101)- 8*rand(1,101);
plot(x,y,'.r');
arf = 0.0001;
k0 = 1;

for j = 1:100
for i = 1:101
    k0 = k0-arf*((k0*x(i)-y(i))*x(i));
end
end

y2 = k0*x;
hold on
plot(x,y2,'.b');
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值