#首先随机创建一些二维数据作为训练集,选择二维特征数据,主要是方便可视化
from numpy import concatenate,column_stack,row_stack
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
from sklearn.datasets.samples_generator import make_blobs
# X为样本特征,Y为样本簇类别, 共1000个样本,每个样本4个特征,共4个簇,
#簇中心在[-1,-1], [0,0],[1,1], [2,2], 簇方差分别为[0.4, 0.2, 0.2]
X, y = make_blobs(n_samples=1000, centers=[[-1,-1], [0,0], [1,1],[2,2]]
,cluster_std=[0.4, 0.2, 0.1,0.2], random_state =9)
plt.scatter(X[:, 0], X[:, 1], marker='o')
plt.show()
#现在我们来用K-Means聚类方法来做聚类,首先选择k=2
from sklearn.cluster import KMeans
y_pred = KMeans(n_clusters=2, random_state=9).fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.show()
#现在我们来看看我们用Calinski-Harabasz Index评估的聚类分数
from sklearn import metrics
score=metrics.calinski_harabaz_score(X,y_pred)
print(score)
#现在k=3来看看聚类效果
y_pred = KMeans(n_clusters=3, random_state=9).fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.show()
score=metrics.calinski_harabaz_score(X,y_pred)
print(score)
#现在我们看看k=4时候的聚类效果
y_pred = KMeans(n_clusters=4, random_state=9).fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.show()
score=metrics.calinski_harabaz_score(X,y_pred)
print(score)#分数越高,评估聚类的准确度越高
#现在我们再看看用MiniBatchKMeans的效果,我们将batch size设置为200. 由于我们的4个簇都是凸的,所以其实batch size的值只要不是非常的小,对聚类的效果影响不大
from sklearn.cluster import MiniBatchKMeans
for index, k in enumerate((2,3,4,5)):
plt.subplot(2,2,index+1)
y_pred = MiniBatchKMeans(n_clusters = k, batch_size = 200,random_state=9).fit_predict(X)
score = metrics.calinski_harabaz_score(X, y_pred)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.text(.99, .01, ('k=%d, score: %.2f' % (k,score)),
transform=plt.gca().transAxes, size = 10,
horizontalalignment = 'right')
plt.show()
KMeans与MiniBatchKMeans
最新推荐文章于 2025-06-20 08:15:00 发布