Pycharm中写一个简易的波士顿房价预测,其中原数据集由于有道德问题,本文中运用了其它链接的数据集,此外,还实现了简易的鸢尾花分类和手写数据集分类:
import sklearn.datasets as sk_datasets
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
if __name__ == '__main__':
#KMeans聚类
iris=sk_datasets.load_iris()
iris_data=iris.data
iris_target=iris.target
#X_train, X_test, y_train, y_test = sk_datasets.train_test_split(iris_data, iris_target, test_size=0.2, random_state=42)
estimator =KMeans(n_clusters=3)
estimator.fit(iris_data)
label_pred=estimator.labels_
x0 = iris_data[label_pred == 0]
x1 = iris_data[label_pred == 1]
x2 = iris_data[label_pred == 2]
plt.scatter(x0[:, 0], x0[:, 1], c="red", marker='o', label='label0')
plt.scatter(x1[:, 0], x1[:, 1], c="green", marker='*', label='label1')
plt.scatter(x2[:, 0], x2[:, 1], c="blue", marker='+', label='label2')
plt.xlabel('sepal length')
plt.ylabel('sepal width')
plt.legend(loc=2)
plt.show()
#PCA
iris_pca = PCA(n_components=2)
iris_pca = iris_pca.fit(iris_data)
iris_pca_dr=iris_pca.transform(iris_data)
colors = ['red', 'black', 'orange']
plt.figure()
for i in [0, 1, 2]:
plt.scatter(iris_pca_dr[iris_target == i, 0]
, iris_pca_dr[iris_target == i, 1]
, alpha=1
, c=colors[i]
, label=iris.target_names[i]
)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.show()
#knn
x_train, x_test, y_train, y_test = train_test_split(iris_data, iris_target, test_size=0.2, random_state=2021)
knn = KNeighborsClassifier(n_neighbors=3)
knn = knn.fit(x_train, y_train)
y_pred = knn.predict(x_test)
y_true = y_test
print('模型的分类结果:', y_pred)
print('真实的分类结果:', y_true)
print(knn.score(x_test, y_test))
#MNIST
#MNIST without PCA
mnist=sk_datasets.load_digits()
mnist_data,mnist_target=mnist['data'],mnist['target']
mnist_x_train,mnist_x_test,mnist_y_train,mnist_y_test=train_test_split(mnist_data,mnist_target,test_size=0.2,random_state=2021)
mnist_knn=KNeighborsClassifier(n_neighbors=3)
mnist_knn=mnist_knn.fit(mnist_x_train,mnist_y_train)
mnist_y_pred=mnist_knn.predict(mnist_x_test)
mnist_y_true=mnist_y_test
print('模型的分类结果:', mnist_y_pred)
print('真实的分类结果:', mnist_y_true)
correct_rate = np.mean(np.equal(mnist_y_true, mnist_y_pred))
print('knn正确率:',correct_rate)
#MNIST_with_PCA
mnist_pca=PCA(n_components=0.95)
mnist_x_train_pca = mnist_pca.fit_transform(mnist_x_train)
mnist_x_test_pca = mnist_pca.transform(mnist_x_test)
mnist_pca_knn=KNeighborsClassifier(n_neighbors=3)
mnist_pca_knn=mnist_pca_knn.fit(mnist_x_train_pca,mnist_y_train)
mnist_pca_y_pred=mnist_pca_knn.predict(mnist_x_test_pca)
pca_correct_rate = np.mean(np.equal(mnist_y_test, mnist_pca_y_pred))
print('pca_knn正确率:',pca_correct_rate)
#boston
data_url = "http://lib.stat.cmu.edu/datasets/boston"
raw_df = pd.read_csv(data_url, sep="\s+", skiprows=22, header=None)
boston_data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
boston_target = raw_df.values[1::2, 2]
X_train, x_test, y_train, y_true = train_test_split(boston_data, boston_target, test_size=0.2)
clf=LinearRegression().fit(X_train,y_train)
boston_y_pred=clf.predict(x_test)
fig=plt.figure(figsize=(12,6))
fig, ax = plt.subplots()
ax.scatter(y_true, boston_y_pred, color='blue', alpha=0.6, label='Predicted vs True')
ax.plot([y_true.min(), y_true.max()], [y_true.min(), y_true.max()], color='red', linestyle='--',
label='Perfect Prediction')
ax.set_title('Linear Regression Predicted vs True')
ax.set_xlabel('True Values')
ax.set_ylabel('Predicted Values')
ax.legend()
plt.show()