mirror of
https://github.com/apachecn/ailearning.git
synced 2026-02-13 15:26:28 +08:00
修改4.NaiveBayes的 朴素贝叶斯.md 文件和 bayes.py
This commit is contained in:
@@ -11,7 +11,7 @@ from numpy import *
|
||||
p(xy)=p(x|y)p(y)=p(y|x)p(x)
|
||||
p(x|y)=p(y|x)p(x)/p(y)
|
||||
"""
|
||||
|
||||
# 项目案例1: 屏蔽社区留言板的侮辱性言论
|
||||
|
||||
def loadDataSet():
|
||||
"""
|
||||
@@ -21,7 +21,7 @@ def loadDataSet():
|
||||
postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], #[0,0,1,1,1......]
|
||||
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
|
||||
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
|
||||
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
|
||||
['stop', 'posting', 'stupid', 'worthless', 'gar e'],
|
||||
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
|
||||
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
|
||||
classVec = [0, 1, 0, 1, 0, 1] # 1 is abusive, 0 not
|
||||
@@ -154,7 +154,7 @@ def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
|
||||
# 计算公式 log(P(F1|C))+log(P(F2|C))+....+log(P(Fn|C))+log(P(C))
|
||||
# 使用 NumPy 数组来计算两个向量相乘的结果,这里的相乘是指对应元素相乘,即先将两个向量中的第一个元素相乘,然后将第2个元素相乘,以此类推。
|
||||
# 我的理解是:这里的 vec2Classify * p1Vec 的意思就是将每个词与其对应的概率相关联起来
|
||||
# 可以理解为 1.单词在词汇表中的条件下,文件是good 类别的概率 也可以理解为 2.在整个空间下,文件既在词汇表中又是good类别的概率
|
||||
# 可以理解为 1.单词在词汇表中的条件下,文件是good 类别的概率 也可以理解为 2.在整个空间下,文件既在词汇表中又是good类别的概率
|
||||
p1 = sum(vec2Classify * p1Vec) + log(pClass1)
|
||||
p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
|
||||
if p1 > p0:
|
||||
@@ -195,5 +195,137 @@ def testingNB():
|
||||
print testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb)
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------------
|
||||
# 项目案例2: 使用朴素贝叶斯过滤垃圾邮件
|
||||
|
||||
# 切分文本
|
||||
def textParse(bigString):
|
||||
import re
|
||||
# 使用正则表达式来切分句子,其中分隔符是除单词、数字外的任意字符串
|
||||
listOfTokens = re.split(r'\W*', bigString)
|
||||
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
|
||||
|
||||
def spamTest():
|
||||
docList = []
|
||||
classList = []
|
||||
fullText = []
|
||||
for i in range(1, 26):
|
||||
wordList = textParse(open('input/4.NaiveBayes/email/spam/%d.txt' % i).read())
|
||||
docList.append(wordList)
|
||||
classList.append(1)
|
||||
wordList = textParse(open('input/4.NaiveBayes/email/ham/%d.txt' % i).read())
|
||||
docList.append(wordList)
|
||||
fullText.extend(wordList)
|
||||
classList.append(0)
|
||||
# 创建词汇表
|
||||
vocabList = createVocabList(docList)
|
||||
trainingSet = range(50)
|
||||
testSet = []
|
||||
# 随机取 10 个邮件用来测试
|
||||
for i in range(10):
|
||||
# random.uniform(x, y) 随机生成一个范围为 x - y 的实数
|
||||
randIndex = int(random.uniform(0, len(trainingSet)))
|
||||
testSet.append(trainingSet[randIndex])
|
||||
del(trainingSet[randIndex])
|
||||
trainMat = []
|
||||
trainClasses = []
|
||||
for docIndex in trainingSet:
|
||||
trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))
|
||||
trainClasses.append(classList[docIndex])
|
||||
p0V, p1V, pSpam = trainNB0(array(trainMat), array(trainClasses))
|
||||
errorCount = 0
|
||||
for docIndex in testSet:
|
||||
wordVector = setOfWords2Vec(vocabList, docList[docIndex])
|
||||
if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
|
||||
errorCount += 1
|
||||
print 'the errorCount is: ', errorCount
|
||||
print 'the testSet length is :', len(testSet)
|
||||
print 'the error rate is :', float(errorCount)/len(testSet)
|
||||
|
||||
|
||||
def testParseTest():
|
||||
print textParse(open('input/4.NaiveBayes/email/ham/1.txt').read())
|
||||
|
||||
# -----------------------------------------------------------------------------------
|
||||
# 项目案例3: 使用朴素贝叶斯从个人广告中获取区域倾向
|
||||
|
||||
# 将文本文件解析成 词条向量
|
||||
def setOfWords2VecMN(vocabList,inputSet):
|
||||
returnVec=[0]*len(vocabList) #创建一个其中所含元素都为0的向量
|
||||
for word in inputSet:
|
||||
if word in vocabList:
|
||||
returnVec[vocabList.index(word)]+=1
|
||||
return returnVec
|
||||
|
||||
|
||||
#文件解析
|
||||
def textParse(bigString):
|
||||
import re
|
||||
listOfTokens=re.split(r'\W*',bigString)
|
||||
return [tok.lower() for tok in listOfTokens if len(tok)>2]
|
||||
|
||||
#RSS源分类器及高频词去除函数
|
||||
def calcMostFreq(vocabList,fullText):
|
||||
import operator
|
||||
freqDict={}
|
||||
for token in vocabList: #遍历词汇表中的每个词
|
||||
freqDict[token]=fullText.count(token) #统计每个词在文本中出现的次数
|
||||
sortedFreq=sorted(freqDict.iteritems(),key=operator.itemgetter(1),reverse=True) #根据每个词出现的次数从高到底对字典进行排序
|
||||
return sortedFreq[:30] #返回出现次数最高的30个单词
|
||||
def localWords(feed1,feed0):
|
||||
import feedparser
|
||||
docList=[];classList=[];fullText=[]
|
||||
minLen=min(len(feed1['entries']),len(feed0['entries']))
|
||||
for i in range(minLen):
|
||||
wordList=textParse(feed1['entries'][i]['summary']) #每次访问一条RSS源
|
||||
docList.append(wordList)
|
||||
fullText.extend(wordList)
|
||||
classList.append(1)
|
||||
wordList=textParse(feed0['entries'][i]['summary'])
|
||||
docList.append(wordList)
|
||||
fullText.extend(wordList)
|
||||
classList.append(0)
|
||||
vocabList=createVocabList(docList)
|
||||
top30Words=calcMostFreq(vocabList,fullText)
|
||||
for pairW in top30Words:
|
||||
if pairW[0] in vocabList:vocabList.remove(pairW[0]) #去掉出现次数最高的那些词
|
||||
trainingSet=range(2*minLen);testSet=[]
|
||||
for i in range(20):
|
||||
randIndex=int(random.uniform(0,len(trainingSet)))
|
||||
testSet.append(trainingSet[randIndex])
|
||||
del(trainingSet[randIndex])
|
||||
trainMat=[];trainClasses=[]
|
||||
for docIndex in trainingSet:
|
||||
trainMat.append(bagOfWords2VecMN(vocabList,docList[docIndex]))
|
||||
trainClasses.append(classList[docIndex])
|
||||
p0V,p1V,pSpam=trainNBO(array(trainMat),array(trainClasses))
|
||||
errorCount=0
|
||||
for docIndex in testSet:
|
||||
wordVector=bagOfWords2VecMN(vocabList,docList[docIndex])
|
||||
if classifyNB(array(wordVector),p0V,p1V,pSpam)!=classList[docIndex]:
|
||||
errorCount+=1
|
||||
print 'the error rate is:',float(errorCount)/len(testSet)
|
||||
return vocabList,p0V,p1V
|
||||
|
||||
|
||||
# 最具表征性的词汇显示函数
|
||||
def getTopWords(ny,sf):
|
||||
import operator
|
||||
vocabList,p0V,p1V=localWords(ny,sf)
|
||||
topNY=[];topSF=[]
|
||||
for i in range(len(p0V)):
|
||||
if p0V[i]>-6.0:topSF.append((vocabList[i],p0V[i]))
|
||||
if p1V[i]>-6.0:topNY.append((vocabList[i],p1V[i]))
|
||||
sortedSF=sorted(topSF,key=lambda pair:pair[1],reverse=True)
|
||||
print "SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**"
|
||||
for item in sortedSF:
|
||||
print item[0]
|
||||
sortedNY=sorted(topNY,key=lambda pair:pair[1],reverse=True)
|
||||
print "NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**"
|
||||
for item in sortedNY:
|
||||
print item[0]
|
||||
|
||||
if __name__ == "__main__":
|
||||
testingNB()
|
||||
# testingNB()
|
||||
spamTest()
|
||||
# laTest()
|
||||
|
||||
Reference in New Issue
Block a user