From 170aa190b88df025140c084a8fd164fb74a1914d Mon Sep 17 00:00:00 2001 From: geekidentity Date: Fri, 31 Mar 2017 00:20:09 +0800 Subject: [PATCH] =?UTF-8?q?svmMLiA.py=20=E6=B3=A8=E9=87=8A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/6.支持向量机.md | 2 + src/python/06.SVM/svmMLiA.py | 76 ++++++++++++++++++++++++++++++------ 2 files changed, 65 insertions(+), 13 deletions(-) diff --git a/docs/6.支持向量机.md b/docs/6.支持向量机.md index 021fa531..951c3e46 100644 --- a/docs/6.支持向量机.md +++ b/docs/6.支持向量机.md @@ -8,6 +8,8 @@ * 我们希望找到离分隔超平面最近的点,确保它们离分隔面的距离尽可能远。 * 支持向量就是离分隔超平面最近的那些点。 +* 支持向量机的目标是找出能够最大化训练集数据间隔(margin)的最优分类超平面。 + * 支持向量机特点 * 优点:泛化错误率低,计算开销不大,结果易解释。 diff --git a/src/python/06.SVM/svmMLiA.py b/src/python/06.SVM/svmMLiA.py index 24aa144a..b25d5b5a 100644 --- a/src/python/06.SVM/svmMLiA.py +++ b/src/python/06.SVM/svmMLiA.py @@ -5,8 +5,13 @@ Chapter 5 source file for Machine Learing in Action @author: Peter/geekidentity """ from numpy import * +import pylab from time import sleep +def main(): + dataArr, labelArr = loadDataSet('testSet.txt') + smoSimple(dataArr, labelArr, 0.6, 0.001, 40) + def loadDataSet(fileName): """ 对文件进行逐行解析,从而得到第行的类标签和整个数据矩阵 @@ -82,29 +87,35 @@ def smoSimple(dataMatIn, classLabels, C, toler, maxIter): alphas = mat(zeros((m,1))) iter = 0 # 没有任何alpha改变的情况下遍历数据的次数 while (iter < maxIter): + w = calcWs(alphas, dataMatIn, classLabels) + print("w:", w) alphaPairsChanged = 0 #记录alpha是否已经进行优化,每次循环时设为0,然后再对整个集合顺序遍历 for i in range(m): fXi = float(multiply(alphas,labelMat).T*(dataMatrix*dataMatrix[i,:].T)) + b # 我们预测的类别 - Ei = fXi - float(labelMat[i])#if checks if an example violates KKT conditions 误差 - if ((labelMat[i]*Ei < -toler) and (alphas[i] < C)) or ((labelMat[i]*Ei > toler) and (alphas[i] > 0)): + Ei = fXi - float(labelMat[i])#检查是否违反KKT条件 误差:基于这个实例的预测结果和真实结果的比对,计算误差Ei 参考:http://blog.csdn.net/puqutogether/article/details/44587653 + if ((labelMat[i]*Ei < -toler) and (alphas[i] < C)) or ((labelMat[i]*Ei > toler) and (alphas[i] > 0)): #不管是正负间隔都会测试,同时检查alpha值,保证其不能等于0或C j = selectJrand(i,m) # 误差很大时进行优化 fXj = float(multiply(alphas,labelMat).T*(dataMatrix*dataMatrix[j,:].T)) + b Ej = fXj - float(labelMat[j]) - alphaIold = alphas[i].copy(); alphaJold = alphas[j].copy() - if (labelMat[i] != labelMat[j]): + alphaIold = alphas[i].copy() + alphaJold = alphas[j].copy() + if (labelMat[i] != labelMat[j]): # 将alpha调整到0-C之间 L = max(0, alphas[j] - alphas[i]) H = min(C, C + alphas[j] - alphas[i]) else: L = max(0, alphas[j] + alphas[i] - C) H = min(C, alphas[j] + alphas[i]) if L==H: print("L==H"); continue - eta = 2.0 * dataMatrix[i,:]*dataMatrix[j,:].T - dataMatrix[i,:]*dataMatrix[i,:].T - dataMatrix[j,:]*dataMatrix[j,:].T - if eta >= 0: print("eta>=0"); continue + eta = 2.0 * dataMatrix[i,:]*dataMatrix[j,:].T - dataMatrix[i,:]*dataMatrix[i,:].T - dataMatrix[j,:]*dataMatrix[j,:].T #最优修改量 + if eta >= 0: print("eta>=0"); continue # 如果ETA为0,那么计算新的alphas[j]就比较麻烦了 alphas[j] -= labelMat[j]*(Ei - Ej)/eta alphas[j] = clipAlpha(alphas[j],H,L) + # 检查alpha[j]是否有轻微的改变,如果是的话,就退出for循环。 if (abs(alphas[j] - alphaJold) < 0.00001): print("j not moving enough"); continue + # 对alpha[i], alpha[j]同样进行改变,改变方向一样 alphas[i] += labelMat[j]*labelMat[i]*(alphaJold - alphas[j])#update i by the same amount as j #the update is in the oppostie direction + # 在对alpha[i], alpha[j] 进行优化之后,给这两个alpha值设置一个常数b。 b1 = b - Ei- labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[i,:].T - labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[i,:]*dataMatrix[j,:].T b2 = b - Ej- labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[j,:].T - labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[j,:]*dataMatrix[j,:].T if (0 < alphas[i]) and (C > alphas[i]): b = b1 @@ -112,8 +123,9 @@ def smoSimple(dataMatIn, classLabels, C, toler, maxIter): else: b = (b1 + b2)/2.0 alphaPairsChanged += 1 print("iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged)) + # 在for循环外,检查alpha值是否做了更新,如果在更新则将iter设为0后继续运行程序 if (alphaPairsChanged == 0): iter += 1 - else: iter = 0 + else:iter = 0 print("iteration number: %d" % iter) return b,alphas @@ -135,6 +147,9 @@ def kernelTrans(X, A, kTup): # calc the kernel or transform data to a higher di class optStruct: + """ + 建立的数据结构来保存所有的重要值 + """ def __init__(self, dataMatIn, classLabels, C, toler, kTup): # Initialize the structure with the parameters self.X = dataMatIn self.labelMat = classLabels @@ -143,26 +158,48 @@ class optStruct: self.m = shape(dataMatIn)[0] self.alphas = mat(zeros((self.m, 1))) self.b = 0 - self.eCache = mat(zeros((self.m, 2))) # first column is valid flag + self.eCache = mat(zeros((self.m, 2))) # 第一列给出的是eCache是否有效的标志位,第二列给出的是实际的E值。 self.K = mat(zeros((self.m, self.m))) for i in range(self.m): self.K[:, i] = kernelTrans(self.X, self.X[i, :], kTup) def calcEk(oS, k): + """ + 计算E值并返回 + 该过程在完整版的SMO算法中陪出现次数较多,因此将其单独作为一个方法 + Args: + oS: + k: + + Returns: + + """ fXk = float(multiply(oS.alphas, oS.labelMat).T * oS.K[:, k] + oS.b) Ek = fXk - float(oS.labelMat[k]) return Ek def selectJ(i, oS, Ei): # this is the second choice -heurstic, and calcs Ej + """ + 选择第二个(内循环)alpha的alpha值 + 这里的目标是选择合适的第二个alpha值以保证每次优化中采用最大步长。 + 该函数的误差与第一个alpha值Ei和下标i有关。 + Args: + i: + oS: + Ei: + + Returns: + + """ maxK = -1 maxDeltaE = 0 Ej = 0 - oS.eCache[i] = [1, Ei] # set valid #choose the alpha that gives the maximum delta E - validEcacheList = nonzero(oS.eCache[:, 0].A)[0] + oS.eCache[i] = [1, Ei] # 首先将输入值Ei在缓存中设置成为有效的。这里的有效意味着它已经计算好了。 + validEcacheList = nonzero(oS.eCache[:, 0].A)[0] # 非零E值所对应的alpha值 if (len(validEcacheList)) > 1: - for k in validEcacheList: # loop through valid Ecache values and find the one that maximizes delta E + for k in validEcacheList: # 在所有的值上进行循环,并选择其中使得改变最大的那个值 if k == i: continue # don't calc for i, waste of time Ek = calcEk(oS, k) deltaE = abs(Ei - Ek) @@ -171,13 +208,23 @@ def selectJ(i, oS, Ei): # this is the second choice -heurstic, and calcs Ej maxDeltaE = deltaE; Ej = Ek return maxK, Ej - else: # in this case (first time around) we don't have any valid eCache values + else: # 如果是第一次循环,则随机选择一个alpha值 j = selectJrand(i, oS.m) Ej = calcEk(oS, j) return j, Ej def updateEk(oS, k): # after any alpha has changed update the new value in the cache + """ + 计算误差值并存入缓存中。 + 在对alpha值进行优化之后会用到这个值。 + Args: + oS: + k: + + Returns: + + """ Ek = calcEk(oS, k) oS.eCache[k] = [1, Ek] @@ -455,4 +502,7 @@ def smoPK(dataMatIn, classLabels, C, toler, maxIter): # full Platt SMO elif (alphaPairsChanged == 0): entireSet = True print("iteration number: %d" % iter) - return oS.b, oS.alphas \ No newline at end of file + return oS.b, oS.alphas + +if __name__ == "__main__": + main() \ No newline at end of file