线性回归
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
# Load the diabetes dataset
diabetes_X, diabetes_y = datasets.load_diabetes(return_X_y=True)
# Use only one feature
diabetes_X = diabetes_X[:, np.newaxis, 2]
# 将数据分割成训练/测试集
diabetes_X_train = diabetes_X[:-20]#从后面第20个开始往前数
diabetes_X_test = diabetes_X[-20:]#从后面第20个开始往后数
# 将目标拆分为训练/测试集
diabetes_y_train = diabetes_y[:-20]
diabetes_y_test = diabetes_y[-20:]
# 建立线性回归的对象
regr = linear_model.LinearRegression()
# 使用训练集训练模型
regr.fit(diabetes_X_train, diabetes_y_train)
# 使用测试集进行预测
diabetes_y_pred = regr.predict(diabetes_X_test)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print('Mean squared error: %.2f'
% mean_squared_error(diabetes_y_test, diabetes_y_pred))#mean_squared_error是均方误差回归损失
# 决定系数:1是完美的预测
print('Coefficient of determination: %.2f'% r2_score(diabetes_y_test, diabetes_y_pred))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test,color='black')
plt.plot(diabetes_X_test, diabetes_y_pred, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
