import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
# # 一元二次
# # f(x)=w1*x**2+w2*x+b
# X=np.linspace(0,10,50).reshape(-1,1)
# X=np.concatenate([X**2,X],axis=1)
# print(X.shape)
#
# w=np.random.randint(1,10,size=2)
# b=np.random.randint(-5,5,size=1)
# # 矩阵乘法
# y=X.dot(w)+b
# plt.plot(X[:,1],y)
# plt.title('w1:%d.w2:%d.b:%d'%(w[0],w[1],b[0]))
#
# # 使用sklearn自带的算法预测
# lr=LinearRegression()
# lr.fit(X,y)
# print(lr.coef_,lr.intercept_)
# plt.scatter(X[:,1],y,marker='*')
# x=np.linspace(-2,12,100)
# plt.plot(x,1*x**2+6*x+1,c='green')
# 自己手写的线性回归,拟合多属性,多元的方程
# epoch 训练的次数,梯段下降训练多少
def gradient_descent(X,y,lr,epoch,w,b):
# 一批量多少,长度
batch=len(X)
for i in range(epoch):
d_loss=0
dw=[0 for _ in range(len(w))]
db=0
for j in range(batch):
y_=0 # 预测的值,预测方程 f(x)=w1*x**2+w2*x+b
for n in range(len(w)):
y_+=X[j][n]*w[n]
y_+=b
# (y-y_)**2
d_loss=-(y[j]-y_)
for n in range(len(w)):
dw[n]+=X[j][n]*d_loss/float(batch)
db+=d_loss/float(batch)
# 更新系数和截距,梯度下降
for n in range(len(w)):
w[n]-=dw[n]*lr[n]
b-=db*lr[0]
return w,b
lr=[0.0001,0.001]
w=np.random.randn(2)
b=np.random.randn(1)[0]
X=np.linspace(0,10,50).reshape(-1,1)
X=np.concatenate([X**2,X],axis=1)
y=X.dot(w)+b
w_,b_=gradient_descent(X,y,lr,500,w,b)
print(w_,b_)
plt.scatter(X[:,1],y,marker='*')
x=np.linspace(-2,12,100)
f=lambda x:w_[0]*x**2+w_[1]*x+b
plt.plot(x,f(x),c='green')