1,回归算法-线性回归分析
线性回归详解
2,线性回归实例
# coding: gbk
from sklearn.datasets import fetch_california_housing
from sklearn.linear_model import LinearRegression,SGDRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
def mylinear():
"""
线性回归直接预测波士顿房价
:return:
"""
# 获取数据
lb = fetch_california_housing()
# 分割数据集到训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(lb.data, lb.target, test_size=0.25)
# 进行标准化处理(特征值和目标值都需要标准化)
std_x = StandardScaler()
x_train = std_x.fit_transform(x_train)
x_test = std_x.transform(x_test)
# 目标值
std_y = StandardScaler()
y_train = std_y.fit_transform(y_train.reshape(-1, 1))
y_test = std_y.transform(y_test.reshape(-1, 1))
# estimator预测
# 正规方程求解方式预测结果
lr = LinearRegression()
lr.fit(x_train,y_train)
print(lr.coef_)
# 预测测试集房价
y_predict = std_y.inverse_transform(lr.predict(x_test))
print("测试集里面每个房子的预测价格:", y_predict)
# 梯度下降求解方式预测结果
sgd = SGDRegressor()
sgd.fit(x_train, y_train)
print(sgd.coef_)
# 预测测试集房价
y_predict = std_y.inverse_transform(sgd.predict(x_test.reshape))
print("sgd测试集里面每个房子的预测价格:", y_predict)
return None
if __name__ == "__main__":
mylinear()
3,回归性能评估
均方误差MSE:
API: mean_squared_error
4,分类算法-逻辑回归
1)模型的保存与加载
# 保存训练好的模型
joblib.dump(lr,"B:/PycharmProjects/PythonProject/机器学习/model/test.pkl")
# 加载模型
model = joblib.load("B:/PycharmProjects/PythonProject/机器学习/model/test.pkl")
y_predict = std_y.inverse_transform(model.predict(x_test))
5,分类–逻辑回归实例
详解逻辑回归
# coding: gbk
import numpy as np
from sklearn.linear_model import LogisticRegression
import pandas as pd
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
def logistic():
"""
逻辑回归
:return:
"""
# 构造列标签名字
column = ['Sample code number', 'Clump Thickness', 'Unidormity of Cell Size', 'Uniformity of Cell Shape',
'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclel', 'Bland Chromatin', 'Normal Nucleoli',
'Mitoses', 'Class']
# 读取数据
data = pd.read_csv(
"http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data",
names=column)
print(data)
# 缺失值处理
data = data.replace(to_replace='?', value=np.nan)
data = data.dropna()
# 进行数据的分割
x_train, x_test, y_train, y_test = train_test_split(data[column[1:10]], data[column[10]], test_size=0.25)
# j进行标准化处理
std = StandardScaler()
x_train = std.fit_transform(x_train)
x_test - std.transform(x_test)
# 逻辑回归的预测()
lg = LogisticRegression(C=1.0)
lg.fit(x_train, y_train) # 进行训练,利用对数自然损值不断去求最小值,优化W值
print(lg.coef_) # 逻辑回归的权重参数
y_predict = lg.predict(x_test) # 获取预测值
print("准确率:", lg.score(x_test, y_test))
print("召回率:", classification_report(y_test, y_predict, labels=[2, 4], target_names=["良性", "恶性"]))
return None
if __name__ == "__main__":
logistic()
6,聚类算法k-means
详解k-means