'''
Created on Nov 4, 2010
Chapter 5 source file for Machine Learing in Action
@author: Peter
'''
from numpy import *
from time import sleep
# 加载数据
def loadDataSet(fileName):
"""
:param fileName: 文件名称
:return: 数据矩阵和标签矩阵
"""
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = line.strip().split('\t')
dataMat.append([float(lineArr[0]), float(lineArr[1])])
labelMat.append(float(lineArr[2]))
return dataMat, labelMat
def selectJrand(i, m):
"""
随机选择一个j,满足j != i
:param i: 第一个数据下标
:param m: 数据个数
:return: 第二个随机挑选的下标
"""
j = i # we want to select any J not equal to i
while j == i:
j = int(random.uniform(0, m))
return j
def clipAlpha(aj, H, L):
"""
裁剪aj,使其数据在一定得范围(L,H)内部
:param aj:
:param H: 上界
:param L: 下界
:return: 裁剪之后的数据
"""
if aj > H:
aj = H
if L > aj:
aj = L
return aj
# 存储数据的类
class optStruct:
def __init__(self, dataMatIn, classLabels, C, toler): # Initialize the structure with the parameters
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = shape(dataMatIn)[0]
self.alphas = mat(zeros((self.m, 1)))
self.b = 0
self.eCache = mat(zeros((self.m, 2))) # first column is valid flag, 第一列表示有效位,第二列才是真实的数据
def calcEk(oS, k):
"""
按照书上的公式计算E_i
:param oS:
:param k:
:return:
"""
fXk = float(multiply(oS.alphas, oS.labelMat).T * (oS.X * oS.X[k, :].T)) + oS.b
Ek = fXk - float(oS.labelMat[k])
return Ek
def selectJ(i, oS, Ei): # this is the second choice -heurstic, and calcs Ej
"""
给定外部循环所选择的第一个下标之后,通过一定的方法选择内部循环的下标
:param i:
:param oS:
:param Ei:
:return: 返回内部循环选择的下标j和 E_j
"""
maxK = -1
maxDeltaE = 0
Ej = 0
oS.eCache[i] = [1, Ei] # set valid #choose the alpha that gives the maximum delta E
# 寻找eCache中的所有有效数据,并返回他们的位置
validEcacheList = nonzero(oS.eCache[:, 0].A)[0]
# 如果存在有效数据
if len(validEcacheList) > 1:
# 对每个有效数据进行遍历,这一步需要选择|E_i - E_k|尽可能大的数据
for k in validEcacheList: # loop through valid Ecache values and find the one that maximizes delta E
if k == i:
continue # don't calc for i, waste of time
# 计算E_k
Ek = calcEk(oS, k)
deltaE = abs(Ei - Ek)
if deltaE > maxDeltaE:
maxK = k
maxDeltaE = deltaE
Ej = Ek
return maxK, Ej
# 如果不存在有效数据,就随机选择一个
else: # in this case (first time around) we don't have any valid eCache values
j = selectJrand(i, oS.m)
Ej = calcEk(oS, j)
return j, Ej
def updateEk(oS, k): # after any alpha has changed update the new value in the cache
"""
对eCache进行更新,并设置有效位为1
:param oS:
:param k:
:return:
"""
Ek = calcEk(oS, k)
oS.eCache[k] = [1, Ek]
def innerL(i, oS):
"""
该函数对外部循环产生的第一个下标产生合适的第二个alpha变量的下标,并返回是否是一对有效的alpha对,0表示不是,1表示是。
:param i:
:param oS:
:return:
"""
Ei = calcEk(oS, i)
if (oS.labelMat[i] * Ei < -oS.tol and oS.alphas[i] < oS.C) or (oS.labelMat[i] * Ei > oS.tol and oS.alphas[i] > 0):
# 随机选择
j, Ej = selectJ(i, oS, Ei) # this has been changed from selectJrand
alphaIold = oS.alphas[i].copy()
alphaJold = oS.alphas[j].copy()
# 产生合适的上界和下界
if oS.labelMat[i] != oS.labelMat[j]:
L = max(0, oS.alphas[j] - oS.alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
# 如果上界和下界相同,返回不是一对有效的数据对标志
if L == H:
print("L==H")
return 0
# 计算eta,用以更新变量
eta = 2.0 * oS.X[i, :] * oS.X[j, :].T - oS.X[i, :] * oS.X[i, :].T - oS.X[j, :] * oS.X[j, :].T
"""以下三行代码存疑,在实际的推导过程中,并没有对eta进行过约束,去掉之后也可以产生合理的结果"""
if eta >= 0:
print("eta>=0")
return 0
# 计算更新之后的alpha_j,并裁剪
oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej) / eta
oS.alphas[j] = clipAlpha(oS.alphas[j], H, L)
updateEk(oS, j) # added this for the Ecache
"""以下代码也存疑,为什么不需要将变化之前的j变回去,小变化也是变化啊"""
# 如果变化太小
if abs(oS.alphas[j] - alphaJold) < 0.00001:
oS.alphas[j] = alphaJold
print("j not moving enough")
return 0
# 按照alpha_j的新数据和原来的alpha_i和alpha_j更新新的alpha_i
oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] * (alphaJold - oS.alphas[j]) # update i by the same amount as j
updateEk(oS, i) # added this for the Ecache #the update is in the oppostie direction
# 更新参数b,因为涉及两个alpha分量,因此计算出的b可能会有两个不同的值,需要分别计算和做取舍
b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.X[i, :] * oS.X[i, :].T - oS.labelMat[j] * (
oS.alphas[j] - alphaJold) * oS.X[i, :] * oS.X[j, :].T
b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.X[i, :] * oS.X[j, :].T - oS.labelMat[j] * (
oS.alphas[j] - alphaJold) * oS.X[j, :] * oS.X[j, :].T
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]):
oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]):
oS.b = b2
else:
oS.b = (b1 + b2) / 2.0
# 更新了一组alpha分量之后就返回1
return 1
else:
# 由于没有更新一组alpha分量,所以之后就返回0
return 0
def smoP(dataMatIn, classLabels, C, toler, maxIter): # full Platt SMO
# 初始化数据类,和一些必要的数据变量
oS = optStruct(mat(dataMatIn), mat(classLabels).transpose(), C, toler)
iter = 0
entireSet = True
alphaPairsChanged = 0
# 进行循环
while iter < maxIter and not (alphaPairsChanged > 0 and entireSet):
# 在每一次循环中,将alpha改变的对数统计信息置为0
alphaPairsChanged = 0
# 如果需要进行全局搜索
if entireSet: # go over all
# 对所有的alpha变量依次进行遍历
for i in range(oS.m):
# 将alpha对的数目进行统计
alphaPairsChanged += innerL(i, oS)
print("fullSet, iter: %d i:%d, pairs changed %d" % (iter, i, alphaPairsChanged))
iter += 1
# 反之则在非边界的alpha中进行遍历
else: # go over non-bound (railed) alphas
# 计算alpha中位于0和C之间的数据的下标
nonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
# 依次对求得的下标进行遍历
for i in nonBoundIs:
alphaPairsChanged += innerL(i, oS)
print("non-bound, iter: %d i:%d, pairs changed %d" % (iter, i, alphaPairsChanged))
iter += 1
# 如果之前进行了全局的搜索,那么可以将该标志位置为False
if entireSet:
entireSet = False # toggle entire set loop
# 如果在前面没有任何一个alpha对进行了更新,那么我们在下一轮循环中需要对其进行全局搜索。
elif alphaPairsChanged == 0:
entireSet = True
print("iteration number: %d" % iter)
return oS.b, oS.alphas
def calcWs(alphas, dataArr, classLabels):
X = mat(dataArr)
labelMat = mat(classLabels).transpose()
m, n = shape(X)
w = zeros((n, 1))
for i in range(m):
w += multiply(alphas[i] * labelMat[i], X[i, :].T)
return w
if __name__ == '__main__':
dataArr, labelArr = loadDataSet('testSet.txt')
b, alphas = smoP(dataArr, labelArr, 0.6, 0.001, 40)
# print(b)
# print(alphas)
# print(alphas[alphas > 0])
print(calcWs(alphas, dataArr, labelArr))
机器学习复习:SMO算法源码解读
最新推荐文章于 2025-03-12 14:46:51 发布