算法思路
根据吴恩达老师视频中的以下内容编写代码


代码
import matplotlib.pyplot as plt
import numpy as np
class Linear_Regression():
def __init__(self, data, theta0, theta1, learning_rate):
self.data = data
self.theta0 = theta0
self.theta1 = theta1
self.learning_rate = learning_rate
self.m = len(data)
# hypothesis
def h_theta(self, x):
return self.theta0 + self.theta1 * x
# cost function
def J(self):
temp = 0
for i in range(self.m):
temp += pow(self.h_theta(self.data[i][0]) - self.data[i][1], 2)
return 1 / (2 * self.m) * temp
# partial derivative
def pd_theta0_J(self):
temp = 0
for i in range(self.m):
temp += self.h_theta(self.data[i][0]) - self.data[i][1]
return 1 / self.m * temp
def pd_theta1_J(self):
temp = 0
for i in range(self.m):
temp += (self.h_theta(data[i][0]) - self.data[i][1]) * self.data[i][0]
return 1 / self.m * temp
# gradient descent
def gd(self):
min_cost = 0.01
round = 1
max_round = 100
while (min_cost < abs(self.J()) and round <= max_round):
temp0 = self.theta0 - self.learning_rate * self.pd_theta0_J()
temp1 = self.theta1 - self.learning_rate * self.pd_theta1_J()
self.theta0 = temp0
self.theta1 = temp1
print('round', round, ':\ttheta0=%.16f' % self.theta0, '\ttheta1=%.16f' % self.theta1)
round += 1
return self.theta0, self.theta1
if __name__ == '__main__':
#set parameters
data = [[1,2],[2,5],[4,8],[5,9],[8,15]]
theta0 = 0
theta1 = 0
learning_rate = 0.01
#plot scatter
x = []
y = []
for i in range(len(data)):
x.append(data[i][0])
y.append(data[i][1])
plt.scatter(x,y)
#gradient descent
linear_regression = Linear_Regression(data, theta0, theta1, learning_rate)
theta0, theta1 = linear_regression.gd()
#plot returned linear
x=np.arange(0, 10, 0.01)
y=theta0 + theta1 * x
plt.plot(x,y)
plt.show()
效果图

本文详细介绍了一种基于梯度下降法的线性回归算法实现过程,包括假设函数、代价函数、偏导数计算及梯度下降迭代等核心步骤,并通过Python代码实例展示了如何从零开始训练模型,最终绘制出拟合直线。
&spm=1001.2101.3001.5002&articleId=84993514&d=1&t=3&u=19f85fac3cce484c8f171f00cd44e2b9)
3204

被折叠的 条评论
为什么被折叠?



