基本定义
给定一个训练样本集,分类学习最基本的想法就是基于训练集D在样本空间中找到一个划分超平面,将不同的类别的样本分开。这个划分超平面越鲁棒,对未见示例的泛化能力就越强。划分超平面可通过如下线性方程来描述。
WTX+b=0WTX+b=0
其中w=(w1;w2;...;wd)w=(w1;w2;...;wd)是法向量,决定了超平面的方向。b是位移项,决定了超平面与远点的距离。显然划分超平面可以被法向量w和位移b确定,将其记为(w,b)(w,b).样本空间中任一点x到超平面(w,b)(w,b)的距离可以写为r=|WTX+b|||w||r=|WTX+b|||w||
假设其能正确的分类,则对于(xi,yi)∈D(xi,yi)∈D,若yi=+1yi=+1,则有wTxi+b>0wTxi+b>0,若yi=-1yi=-1,则有wTxi+b<0wTxi+b<0,令{wTxi+b≥+1 if yi=+1wTxi+b≤−1 if yi=−1{wTxi+b≥+1 if yi=+1wTxi+b≤−1 if yi=−1
距离超平面最近的点使得等号成立,它们被称为“支持向量”,两个异类支持向量到超平面的距离之和为γ=2||w||γ=2||w||
它被称为间隔(margin),优化目标则是找到具有最大间隔的划分超平面,因此SVM也被称为最大间隔分类。因此优化目标可以具体化为minw,b12||w||2minw,b12||w||2
s.t.yi(wTxi+b)≥1,i=1,2,...m.s.t.yi(wTxi+b)≥1,i=1,2,...m.
这就是支持向量机的基本型。
对偶问题
对于SVM的基本型,可以使用拉格朗日乘子法的到其对偶类型。
下面将对在多个约束的情况下的对偶问题进行一定的推倒。考虑具有m个等式和n个不等式的约束。
minxf(x)minxf(x)
s.t.hi(x)=0(i=1,...m)s.t.hi(x)=0(i=1,...m)
s.t.gj(x)≤0(j=1,...m)s.t.gj(x)≤0(j=1,...m)
引入拉格朗日乘子λ=(λ1,λ2,...,λm)Tλ=(λ1,λ2,...,λm)T和μ=(μ1,μ2,...,μn)Tμ=(μ1,μ2,...,μn)T,相应的拉格朗日函数为 L(x,λ,μ)=f(x)+∑i=1mλihi(x)+∑j=1nμjgj(x)L(x,λ,μ)=f(x)+∑i=1mλihi(x)+∑j=1nμjgj(x)
由不等式引入的KKT条件为⎧⎩⎨⎪⎪gj(x)≤0 ; μj≥0 ; μjgj(x)=0 ; {gj(x)≤0 ; μj≥0 ; μjgj(x)=0 ;
对于该主问题,设其对偶问题为τ(λ,μ)=inf(f(x)+∑mi=1λihi(x)+∑nj=1μjgj(x))τ(λ,μ)=inf(f(x)+∑i=1mλihi(x)+∑j=1nμjgj(x)) 由上述条件显然∑mi=1λihi(x)+∑nj=1μjgj(x)≤0∑i=1mλihi(x)+∑j=1nμjgj(x)≤0
因此有
τ(λ,μ)≤f(x~)τ(λ,μ)≤f(x~)
因此对偶函数给出了主问题最优值的下界。很自然的一个问题是基于对偶函数能获得的最好下界是什么?这就引出了优化问题maxλ,μτ(λ,μ)s.t.μ⪰0maxλ,μτ(λ,μ)s.t.μ⪰0
这就是主问题的对偶问题了。
下面考虑原问题。该问题的拉格朗日函数可以写为
L(w,b,α)=12||w||2+∑i=1mαi(1−yi(wTxi+b))L(w,b,α)=12||w||2+∑i=1mαi(1−yi(wTxi+b))
对w和b偏导为0可以得到w=∑i=1mαiyixiw=∑i=1mαiyixi
∑i=1mαiyi=0∑i=1mαiyi=0
从而得到对偶问题:maxα∑i=1mαi−12∑i=1m∑j=1mαiαjyiyjxTixjmaxα∑i=1mαi−12∑i=1m∑j=1mαiαjyiyjxiTxj
s.t.∑i=1mαiyi=0,αi≥0(i=1,2,...m)s.t.∑i=1mαiyi=0,αi≥0(i=1,2,...m)
解出αα后,求出w和b即可得到模型f(x)=∑i=1mαiyixTix+bf(x)=∑i=1mαiyixiTx+b
那么应该如何计算αα呢?
SMO算法
SMO的基本思路是先固定αiαi之外的所有参数,然后求αiαi上的极值。由于存在约束∑mi=1αiyi=0∑i=1mαiyi=0若固定αiαi之外的其他变量,则αiαi可由其它变量打出。于是每次选择两个变量αiαi和αjαj并固定其它参数。不断执行如下步骤直至收敛。
- 选取一对更新的变量αiαi、αjαj
- 固定αiαi和αjαj以外的参数,求解更新后的αiαi和αjαj。
SMO首先选取违背KKT条件程度最大的变量。选取的第二个变量则应该使对应样本间的间隔最大。
对于b则使用所有支持向量求解的平均值。
核函数
对于某些原始样本空间内不存在一个能正确划分两类样本的超平面的问题,可将样本从原始空间映射到一个更高维的特征空间,使得样本在这个特征空间内线性可分。令ϕ(x)ϕ(x)表示将xx映射后的特征向量,于是在特征空间内划分超平面所对应的模型可表示为
剩余推导都和上文类似,除了在计算维度上有一定难度上的提高。
对于映射有一些基本的经验,例如对于文本数据通常采用线性核,情况不明可以先采取高斯核。
- 线性核: κ(xi,xj)=xTixjκ(xi,xj)=xiTxj
- 高斯核: κ(xi,xj)=e−||xi−xj||22σ2κ(xi,xj)=e−||xi−xj||22σ2
实战
from numpy import *
from time import sleep
def loadDataSet(filename):
dataMat=[]
labelMat=[]
with open(filename,'r') as fr:
for line in fr.readlines():
lineArr=line.strip().split('\t')
dataMat.append([float(lineArr[0]),float(lineArr[1])])
labelMat.append(float(lineArr[2]))
return dataMat,labelMat
def selectJrand(i,m):
j=i
while(j==i):
j=int(random.uniform(0,m))
return j
def clipAlpha(aj,H,L):
if aj>H:
aj=H
if L>aj:
aj=L
return aj
def smoSimple(dataMatIn,classLabels,C,toler,maxIter):
dataMatrix=mat(dataMatIn)
labelMat=mat(classLabels).transpose()
b=0;m,n=shape(dataMatrix)
alphas=mat(zeros((m,1)))
iter=0
while(iter<maxIter):
alphaPairsChanged=0
for i in range(m):
fXi=float(multiply(alphas,labelMat).T*(dataMatrix*dataMatrix[i,:].T))+b
Ei=fXi-float(labelMat[i])
if(((labelMat[i]*Ei<-toler)and(alphas[i]<C))or((labelMat[i]*Ei>toler)and(alphas[i]>0))):
j=selectJrand(i,m)
fXj=float(multiply(alphas,labelMat).T*(dataMatrix*dataMatrix[j,:].T))+b
Ej=fXj-float(labelMat[j])
alphaIold=alphas[i].copy()
alphaJold=alphas[j].copy()
if(labelMat[i]!=labelMat[j]):
L=max(0,alphas[j]-alphas[i])
H=min(C,C+alphas[j]-alphas[i])
else:
L=max(0,alphas[j]+alphas[i]-C)
H=min(C,alphas[j]+alphas[i])
if(L==H):
print('L==H')
continue
eta=2.0*dataMatrix[i,:]*dataMatrix[j,:].T-dataMatrix[i,:]*dataMatrix[i,:].T-dataMatrix[j,:]*dataMatrix[j,:].T
if(eta>=0):
print('eta>=0')
continue
alphas[j]-=labelMat[j]*(Ei-Ej)/eta
alphas[j]=clipAlpha(alphas[j],H,L)
if(abs(alphas[j]-alphaJold)<0.00001):
print('j not moving enough')
continue
alphas[i]+=labelMat[j]*labelMat[i]*(alphaJold-alphas[j])
b1 = b - Ei- labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[i,:].T - labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[i,:]*dataMatrix[j,:].T
b2 = b - Ej- labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[j,:].T - labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[j,:]*dataMatrix[j,:].T
if (0 < alphas[i]) and (C > alphas[i]): b = b1
elif (0 < alphas[j]) and (C > alphas[j]): b = b2
else: b = (b1 + b2)/2.0
alphaPairsChanged += 1
print ("iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
if (alphaPairsChanged == 0): iter += 1
else: iter = 0
print ("iteration number: %d" % iter)
return b,alphas
def kernelTrans(X, A, kTup): #calc the kernel or transform data to a higher dimensional space
m,n = shape(X)
K = mat(zeros((m,1)))
if kTup[0]=='lin': K = X * A.T #linear kernel
elif kTup[0]=='rbf':
for j in range(m):
deltaRow = X[j,:] - A
K[j] = deltaRow*deltaRow.T
K = exp(K/(-1*kTup[1]**2)) #divide in NumPy is element-wise not matrix like Matlab
else: raise NameError('Houston We Have a Problem -- \
That Kernel is not recognized')
return K
class optStruct:
def __init__(self,dataMatIn, classLabels, C, toler, kTup): # Initialize the structure with the parameters
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = shape(dataMatIn)[0]
self.alphas = mat(zeros((self.m,1)))
self.b = 0
self.eCache = mat(zeros((self.m,2))) #first column is valid flag
self.K = mat(zeros((self.m,self.m)))
for i in range(self.m):
self.K[:,i] = kernelTrans(self.X, self.X[i,:], kTup)
def calcEk(oS,k):
fXk=float(multiply(oS.alphas,oS.labelMat).T*(oS.X*oS.X[k,:].T))+oS.b
Ek=fXk-float(oS.labelMat[k])
return Ek
def selectJ(i, oS, Ei): #this is the second choice -heurstic, and calcs Ej
maxK = -1; maxDeltaE = 0; Ej = 0
oS.eCache[i] = [1,Ei] #set valid #choose the alpha that gives the maximum delta E
validEcacheList = nonzero(oS.eCache[:,0].A)[0]
if (len(validEcacheList)) > 1:
for k in validEcacheList: #loop through valid Ecache values and find the one that maximizes delta E
if k == i: continue #don't calc for i, waste of time
Ek = calcEk(oS, k)
deltaE = abs(Ei - Ek)
if (deltaE > maxDeltaE):
maxK = k; maxDeltaE = deltaE; Ej = Ek
return maxK, Ej
else: #in this case (first time around) we don't have any valid eCache values
j = selectJrand(i, oS.m)
Ej = calcEk(oS, j)
return j, Ej
def updateEk(oS, k):#after any alpha has changed update the new value in the cache
Ek = calcEk(oS, k)
oS.eCache[k] = [1,Ek]
def innerL(i, oS):
Ei = calcEk(oS, i)
if ((oS.labelMat[i]*Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i]*Ei > oS.tol) and (oS.alphas[i] > 0)):
j,Ej = selectJ(i, oS, Ei) #this has been changed from selectJrand
alphaIold = oS.alphas[i].copy(); alphaJold = oS.alphas[j].copy();
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS.alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L==H: print ("L==H"); return 0
eta = 2.0 * oS.K[i,j] - oS.K[i,i] - oS.K[j,j] #changed for kernel
if eta >= 0: print ("eta>=0"); return 0
oS.alphas[j] -= oS.labelMat[j]*(Ei - Ej)/eta
oS.alphas[j] = clipAlpha(oS.alphas[j],H,L)
updateEk(oS, j) #added this for the Ecache
if (abs(oS.alphas[j] - alphaJold) < 0.00001): print ("j not moving enough"); return 0
oS.alphas[i] += oS.labelMat[j]*oS.labelMat[i]*(alphaJold - oS.alphas[j])#update i by the same amount as j
updateEk(oS, i) #added this for the Ecache #the update is in the oppostie direction
b1 = oS.b - Ei- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.K[i,i] - oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.K[i,j]
b2 = oS.b - Ej- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.K[i,j]- oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.K[j,j]
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]): oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]): oS.b = b2
else: oS.b = (b1 + b2)/2.0
return 1
else: return 0
def smoP(dataMatIn, classLabels, C, toler, maxIter,kTup=('lin', 0)): #full Platt SMO
oS = optStruct(mat(dataMatIn),mat(classLabels).transpose(),C,toler, kTup)
iter = 0
entireSet = True; alphaPairsChanged = 0
while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
alphaPairsChanged = 0
if entireSet: #go over all
for i in range(oS.m):
alphaPairsChanged += innerL(i,oS)
print ("fullSet, iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
iter += 1
else:#go over non-bound (railed) alphas
nonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
for i in nonBoundIs:
alphaPairsChanged += innerL(i,oS)
print ("non-bound, iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
iter += 1
if entireSet: entireSet = False #toggle entire set loop
elif (alphaPairsChanged == 0): entireSet = True
print ("iteration number: %d" % iter)
return oS.b,oS.alphas
def calcWs(alphas,dataArr,classLabels):
X = mat(dataArr); labelMat = mat(classLabels).transpose()
m,n = shape(X)
w = zeros((n,1))
for i in range(m):
w += multiply(alphas[i]*labelMat[i],X[i,:].T)
return w
def testRbf(k1=1.3):
dataArr,labelArr = loadDataSet('testSetRBF.txt')
b,alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, ('rbf', k1)) #C=200 important
datMat=mat(dataArr); labelMat = mat(labelArr).transpose()
svInd=nonzero(alphas.A>0)[0]
sVs=datMat[svInd] #get matrix of only support vectors
labelSV = labelMat[svInd];
print ("there are %d Support Vectors" % shape(sVs)[0])
m,n = shape(datMat)
errorCount = 0
for i in range(m):
kernelEval = kernelTrans(sVs,datMat[i,:],('rbf', k1))
predict=kernelEval.T * multiply(labelSV,alphas[svInd]) + b
if sign(predict)!=sign(labelArr[i]): errorCount += 1
print ("the training error rate is: %f" % (float(errorCount)/m))
dataArr,labelArr = loadDataSet('testSetRBF2.txt')
errorCount = 0
datMat=mat(dataArr); labelMat = mat(labelArr).transpose()
m,n = shape(datMat)
for i in range(m):
kernelEval = kernelTrans(sVs,datMat[i,:],('rbf', k1))
predict=kernelEval.T * multiply(labelSV,alphas[svInd]) + b
if sign(predict)!=sign(labelArr[i]): errorCount += 1
print ("the test error rate is: %f" % (float(errorCount)/m))
def img2vector(filename):
returnVect = zeros((1,1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0,32*i+j] = int(lineStr[j])
return returnVect
def loadImages(dirName):
from os import listdir
hwLabels = []
trainingFileList = listdir(dirName) #load the training set
m = len(trainingFileList)
trainingMat = zeros((m,1024))
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0] #take off .txt
classNumStr = int(fileStr.split('_')[0])
if classNumStr == 9: hwLabels.append(-1)
else: hwLabels.append(1)
trainingMat[i,:] = img2vector('%s/%s' % (dirName, fileNameStr))
return trainingMat, hwLabels
def testDigits(kTup=('rbf', 10)):
dataArr,labelArr = loadImages('trainingDigits')
b,alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, kTup)
datMat=mat(dataArr); labelMat = mat(labelArr).transpose()
svInd=nonzero(alphas.A>0)[0]
sVs=datMat[svInd]
labelSV = labelMat[svInd]
print ("there are %d Support Vectors" % shape(sVs)[0])
m,n = shape(datMat)
errorCount = 0
for i in range(m):
kernelEval = kernelTrans(sVs,datMat[i,:],kTup)
predict=kernelEval.T * multiply(labelSV,alphas[svInd]) + b
if sign(predict)!=sign(labelArr[i]): errorCount += 1
print ("the training error rate is: %f" % (float(errorCount)/m))
dataArr,labelArr = loadImages('testDigits')
errorCount = 0
datMat=mat(dataArr); labelMat = mat(labelArr).transpose()
m,n = shape(datMat)
for i in range(m):
kernelEval = kernelTrans(sVs,datMat[i,:],kTup)
predict=kernelEval.T * multiply(labelSV,alphas[svInd]) + b
if sign(predict)!=sign(labelArr[i]): errorCount += 1
print ("the test error rate is: %f" % (float(errorCount)/m))