目录
线性回归
"""
简单的线性回归例子
"""
from sklearn import linear_model
X = [[0, 0], [1, 1], [2, 2]]
y = [0, 1, 2]
reg = linear_model.LinearRegression()
reg.fit(X, y) # 训练模型 X-training data;y-target values
z = reg.coef_ # 线性回归函数的参数
k = reg.predict([[3, 3]]) # 预测[3,3]对应的值
print(z, k)
回归树
"""
回归树
"""
from sklearn import tree
X = [[0, 0], [1, 1], [2, 2]]
y = [0, 1, 2]
dt_reg_test = tree.DecisionTreeRegressor()
dt_reg_test = dt_reg_test.fit(X, y) # 训练模型
re = dt_reg_test.predict([[3, 3]]) # 预测
print(re)
K近邻
"""
K近邻回归
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
# 生成样本数据,并加入随机误差
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# 加入随机误差
y[::5] += 1 * (0.5 - np.random.rand(8))
# 拟合模型,并画图对比
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i,weights = '%s')" % (n_neighbors, weights))
plt.show()