EM 算法

原理参考:
EM算法及python实现

代码传送门:https://github.com/taifyang/machine-learning

python实现:

import math
import copy
import numpy as np

class EM:
    # 指定k个高斯分布参数,这里指定k=2。注意2个高斯分布具有相同均方差sigma,分别为mu1,mu2。
    def __init__(self, x, sigma, k, n):
        self.x = x
        self.sigma = sigma
        self.k = k
        self.N = n
        self.mu = np.random.random(2)  #随机产生一个初始均值。
        self.expectations = np.zeros((n,k))  #k个高斯分布,100个二维向量组成的矩阵。
        print(self.mu, self.expectations)

    # EM算法:步骤1,计算E[zij]
    def e_step(self):
        #求期望。sigma协方差,k高斯混合模型数,N数据个数。
        for i in range(0, self.N):
            denom = 0
            for j in range(0, self.k):
                denom += math.exp((-1/(2*(float(self.sigma**2))))*(float(self.x[0,i]-self.mu[j]))**2)  #分母项  mu(j)第j个高斯分布的均值。
            for j in range(0,k):
                numer = math.exp((-1/(2*(float(self.sigma**2))))*(float(self.x[0,i]-self.mu[j]))**2)  #分子项
                self.expectations[i,j] = numer / denom      #期望,计算出每一个高斯分布所占的期望,即该高斯分布以多大比例形成这个样本

    # EM算法:步骤2,求最大化E[zij]的参数mu
    def m_step(self):
        for j in range(0, self.k): #遍历k个高斯混合模型数据                   
            numer = 0  #分子项
            denom = 0  #分母项
            for i in range(0, self.N):
                numer += self.expectations[i,j]*self.x[0,i]  #  每一个高斯分布的期望*该样本的值
                denom += self.expectations[i,j]  #第j个高斯分布的总期望值作为分母
                self.mu[j] = numer / denom   #第j个高斯分布新的均值

    # 算法迭代iter_num次,或达到精度epsilon停止迭代
    def predict(self, iter_num, epsilon):
        print(self.x)
        print(u"初始<u1,u2>:", self.mu)   #初始均值
        for i in range(iter_num):
            old_mu = copy.deepcopy(self.mu)  #算法之前的mu
            self.e_step()
            self.m_step()
            print(i,self.mu)  #经过EM算法之后的mu
            if sum(abs(self.mu-old_mu)) < epsilon:
                break

if __name__ == '__main__':
    sigma = 6
    mu1 = 40
    mu2 = 20
    n = 100
    k = 2
    x = np.zeros((1,n)) #x产生的数据 ,k维向
    for i in range(0,n):
        if np.random.random(1) > 0.5:
        #随机从均值为mu1,mu2的分布中取样。
            x[0,i] = np.random.normal()*sigma + mu1
        else:
            x[0,i] = np.random.normal()*sigma + mu2

    em = EM(x, sigma, k, n)
    em.predict(iter_num=100, epsilon=0.001) 

python调包:

from sklearn.datasets import make_blobs
from sklearn.mixture import GaussianMixture

x, y = make_blobs(n_samples=100, n_features=1, centers=[[40],[20]], cluster_std=6)  #产生实验数据
gmm = GaussianMixture(n_components=2, max_iter=100)
gmm.fit(x)
print(gmm.means_)

C++实现:

#include <iostream>
#include <vector>
#include <random>
#include <ctime>

class EM
{
public:
	EM(std::vector<std::vector<float>> x, float sigma, int k, int n)
	{
		m_x = x;
		m_sigma = sigma;
		m_k = k;
		m_n = n;

		m_mu.resize(2);
		for (size_t i = 0; i < m_mu.size(); i++)
		{
			m_mu[i] = 2 * rand() / double(RAND_MAX);
		}

		m_expectations.resize(n, std::vector<float>(k));
		for (size_t i = 0; i < n; i++)
		{
			for (size_t j = 0; j < k; j++)
			{
				m_expectations[i][j] = 0;
			}
		}
	}

	void e_step()
	{
		for (size_t i = 0; i < m_n; i++)
		{
			float denom = 0;
			for (size_t j = 0; j < m_k; j++)
			{
				denom += exp((-1 / (2 * pow(m_sigma, 2)))*pow(m_x[0][i] - m_mu[j],2));
			}
			for (size_t j = 0; j < m_k; j++)
			{
				float numer = exp((-1 / (2 * pow(m_sigma, 2)))*pow(m_x[0][i] - m_mu[j], 2));
				m_expectations[i][j] = numer / denom;
			}
		}
	}

	void m_step()
	{
		for (size_t j = 0; j < m_k; j++)
		{
			float numer = 0;
			float denom = 0;
			for (size_t i = 0; i < m_n; i++)
			{
				numer += m_expectations[i][j] * m_x[0][i];
				denom += m_expectations[i][j];
				m_mu[j] = numer / denom;
			}
		}
	}

	void predict(int iter_num, float epsilon)
	{
		for (size_t i = 0; i < iter_num; i++)
		{
			std::vector<float> old_mu = m_mu;
			e_step();
			m_step();
			for (auto j : m_mu) std::cout << j << " "; std::cout << std::endl;

			float error = 0;
			for (size_t j = 0; j < m_mu.size(); j++)
			{
				error += fabs(m_mu[j] - old_mu[j]);
			}
			if (error < epsilon)
				break;
		}
	}

private:
	std::vector<std::vector<float>> m_x;
	float m_sigma;
	int m_k;
	int m_n;
	std::vector<float> m_mu;
	std::vector<std::vector<float>> m_expectations;
};

int main(int argc, char* argv[])
{
	float sigma = 6;
	float mu1 = 40;
	float mu2 = 20;
	int n = 100;
	int k = 2;

	std::vector<std::vector<float>> x(1, std::vector<float>(n));
	srand((unsigned)time(NULL));
	std::default_random_engine generator;
	for (size_t i = 0; i < n; i++)
	{
		if (rand() / double(RAND_MAX) > 0.5)
		{
			std::normal_distribution<double> dist(mu1, sigma);
			x[0][i] = dist(generator);
		}
		else {
			std::normal_distribution<double> dist(mu2, sigma);
			x[0][i] = dist(generator);
		}
		//std::cout << x[0][i] << std::endl;
	}
	
	EM em = EM(x, sigma, k, n);
	em.predict(100, 0.001);

	system("pause");
	return EXIT_SUCCESS;
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

给算法爸爸上香

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值