# 正则化模型
import numpy as np
def sigmoid(z):
return 1 / (1 + np.exp(-z))
正则化线型回归
# 正则化线性回归
def linearcostReg(theta, X, y, learningRate):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
first = np.log(sigmoid(X*theta.T))
second = y
reg = learningRate * np.power(theta[:, 1, theta.shape[0]], 2)
return np.sum(np.power((first - second), 2), reg) / (2 * len(X))
正则化逻辑回归
# 正则化逻辑回归模型
def costReg(theta, X, y, learningRate):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
second = np.multiply((1 - y), np.log(1-sigmoid(X * theta.T)))
reg = (learningRate/(2 * len(X)) * np.sum(np.power(theta[:, 1, theta.shape[1]], 2)))
return np.sum(first - second) / (len(X)) + reg