PCA降维
二维数据降维
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
rng=np.random.RandomState(8)
data=np.dot(rng.rand(2,2),rng.randn(2,200)).T
df=pd.DataFrame({'x1':data[:,0] ,'x2':data[:,1]})
print(df.head())
print(df.shape)
plt.scatter(df['x1'],df['x2'],alpha=0.8,marker='.')
plt.axis('equal')
plt.grid()

from sklearn.decomposition import PCA
pca=PCA(n_components=1)
pca.fit(df)
print('降维后主成分的方差值为:',pca.explained_variance_)
print('降维后主成分的方差值占总方差的比例为:',pca.explained_variance_ratio_)
print('降维后最大方差的成分为:',pca.components_)
print('降维后主成分的个数为:',pca.n_components_)
x_pca=pca.transform(df)
x_new=pca.inverse_transform(x_pca)
print('original shape:',df.shape)
print('transformed shape:',x_pca.shape)
print(x_pca[:5])
plt.scatter(df['x1'],df['x2'],alpha=0.6,marker='.')
plt.scatter(x_new[:,0],x_new[:,1],alpha=0.9,marker='.',color='r')
plt.axis('equal')
plt.grid()

多维数据降维
from sklearn.datasets import load_digits
digits=load_digits()
print(digits.keys())
print('数据长度为:%i条' % len(digits['data']))
print('数据形状为:', digits.data.shape)
print(digits.data[:2])
pca=PCA(n_components=10)
pca.fit(digits.data)
projected=pca.fit_transform(digits.data)
print(pca.explained_variance_)
print(pca.explained_variance_ratio_)
print('original shape:',digits.data.shape)
print('transformed shape:',projected.shape)
s=pca.explained_variance_
c_s=pd.DataFrame({'b':s,'b_sum':s.cumsum()/s.sum()})
c_s['b_sum'].plot(style='--ko',figsize=(10,4))
plt.axhline(0.85,hold=None,color='r',linestyle='--',alpha=0.8)
plt.text(6,c_s['b_sum'].iloc[6]-0.08, '第7个成分累计贡献率超过85%', color='b')
plt.grid()
