# -*- coding: utf-8 -*-
import numpy as np
import mkdata as mk
import matplotlib.pyplot as plt
import random
N = 100
iterNums = 10000
#核函数 将属性(x1,x2)映射为(x1,x2,x1*x2,x1*x1,x2*x2)
def kernel(X,y):
m,n = X.shape
X_copy = np.zeros((m+3, n))
X_copy[0]=X[0]
X_copy[1]=X[1]
for i in range(n):
X_copy[2][i] = X[0][i]*X[1][i]
X_copy[3][i] = X[0][i]*X[0][i]
X_copy[4][i] = X[1][i]*X[1][i]
return X_copy
#随机梯度下降
def gradientDescent_stochastic(X,y):
m,n = X.shape
w = np.zeros(m)
b = 0
for i in range(iterNums):
j = random.choice(range(n))
w = w - (np.dot(w, X[:,j].T)+b - y[0][j])*X[:,j]
b = b - (np.dot(w, X[:,j].T)+b-y[0][j])
return w,b
#批量梯度下降
def gradientDescent_batch(X, y):
m,n = X.shape
w = np.zeros(m)
b = 0
for i in range(iterNums):
diff = np.zeros(m)
bb = 0
for j in range(n):
diff += (np.dot(w, X[:,j].T)+b-y[0][j])*X[:,j]
bb += np.dot(w,X[:,j].T)+b-y[0][j]
w = w - 0.01 * diff
b = b - 0.01 * bb
return w, b
if __name__ == "__main__":
X,y,w = mk.mk_data(N)
theta, bias = gradientDescent_batch(X,y)
#theta, bias = gradientDescent_stochastic(X,y)
plt.scatter(X[0,y[0]==1], X[1, y[0]==1], color='red')
plt.scatter(X[0,y[0]==-1], X[1, y[0]==-1], color='g')
x=np.arange(-2,2,0.1)
x2 = (-bias-theta[0]*x)/theta[1]
plt.plot(x,x2)
plt.show()