Give the points as below:
| x | y |
| 1 | 3 |
| 2 | 5 |
| 3 | 7 |
| 4 | 9 |
try to find the optiaml function y = w * x + b to fit the points above.
The code as below:
import numpy as np
def compute_mse(w, b, points):
total_error = 0
for i in range(len(points)):
x = points[i][0]
y = points[i][1]
total_error += ((w * x + b) - y) ** 2
return total_error / float(len(points))
def compute_step_grad(curr_w, curr_b, points, lr):
grad_w = 0
grad_b = 0
N = float(len(points))
for i in range(len(points)):
x = points[i][0]
y = points[i][1]
grad_w += 2 * (curr_w * x + curr_b - y) * x / N
grad_b += 2 * (curr_w * x + curr_b - y) / N
next_w = curr_w - lr * grad_w
next_b = curr_b - lr * grad_b
return [next_w, next_b]
def gradient_descent(init_w, init_b, points, lr, iters):
curr_w, curr_b = init_w, init_b
for i in range(iters):
curr_w, curr_b = compute_step_grad(curr_w, curr_b, points, lr)
return [curr_w, curr_b]
def main():
points = np.genfromtxt('points.csv',delimiter=',')
init_w = 0.5
init_b = 0.1
lr = 0.01
iters = 1000
curr_w, curr_b = gradient_descent(init_w, init_b, points, lr, iters)
print('--------------------------------------')
print('initial w = {}, b = {}, mse = {}.'.format(init_w, init_b, compute_mse(init_w, init_b, points)))
print('--------------------------------------')
print('after gradient descent, current w = {}, b = {}, mese = {}.'.format(curr_w, curr_b, compute_mse(curr_w, curr_b, points)))
if __name__ == "__main__":
main()
本文介绍了一种使用梯度下降法来寻找最佳线性回归模型的方法。通过给定的数据点,逐步调整权重和偏置,以最小化损失函数(均方误差)。演示了从初始化参数到实现梯度下降的完整过程。
618

被折叠的 条评论
为什么被折叠?



