朴素贝叶斯python实现

来源:互联网 发布:mac命令行自动补全 编辑:程序博客网 时间:2024/05/16 00:46

概率论是很多机器学习算法基础,朴素贝叶斯分类器之所以称为朴素,是因为整个形式化过程中只做最原始、简单的假设。(这个假设:问题中有很多特征,我们简单假设一个个特征是独立的,该假设称做条件独立性,其实往往实际问题中不是完全独立的,这时需要用到另外一种方法叫做贝叶斯网络),后面利用朴素贝叶斯方法,我们应用在垃圾邮件过滤问题当中去。

贝叶斯决策理论的分类方法:

优点:数据较少仍然有效,可以处理多类别问题。

缺点:对于输入数据的准备方式较为敏感,我理解就是开始要准备每个已经分类好的样本集

数据类型:标称型数据(nominal values provide only enough information to distiguish one object from another = 或者|=)

理论基础就是我们在数理统计课程中学到的那个贝叶斯公式这里不再复述,我们通过计算条件概率进行分类。

Python进行文本分类:

def loadDataSet():    postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],                 ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],                 ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],                 ['stop', 'posting', 'stupid', 'worthless', 'garbage'],                 ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],                 ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]    classVec = [0,1,0,1,0,1]    #1 is abusive, 0 not    return postingList,classVec                 def createVocabList(dataSet):    vocabSet = set([])  #create empty set    for document in dataSet:        vocabSet = vocabSet | set(document) #union of the two sets    return list(vocabSet)def setOfWords2Vec(vocabList, inputSet):    returnVec = [0]*len(vocabList)    for word in inputSet:        if word in vocabList:            returnVec[vocabList.index(word)] = 1        else: print "the word: %s is not in my Vocabulary!" % word    return returnVec


第一个函数loadDataSet()创建样本数据,每个样本数据进行了分类,1为侮辱言论0 为正常言论。

下一个函数则是创建所有文档中出现不重复单词的列表。

第三个函数set转为向量,如果单词出现向量中为1,没出现为0。

def trainNB0(trainMatrix,trainCategory):    numTrainDocs = len(trainMatrix)    numWords = len(trainMatrix[0])    pAbusive = sum(trainCategory)/float(numTrainDocs)    p0Num = ones(numWords); p1Num = ones(numWords)      #change to ones()     p0Denom = 2.0; p1Denom = 2.0                        #change to 2.0    for i in range(numTrainDocs):        if trainCategory[i] == 1:            p1Num += trainMatrix[i]            p1Denom += sum(trainMatrix[i])        else:            p0Num += trainMatrix[i]            p0Denom += sum(trainMatrix[i])    p1Vect = log(p1Num/p1Denom)          #change to log()    p0Vect = log(p0Num/p0Denom)          #change to log()    return p0Vect,p1Vect,pAbusive


这个函数中进行了两个优化需要注意:初始化矩阵为1,避免因为某个特性下概率为0 就导致整体概率为0 ,还有一个就是概率进行乘法过后数值会比较小,取log方便比较。

测试代码函数部分:

def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):    p1 = sum(vec2Classify * p1Vec) + log(pClass1)    #element-wise mult    p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)    if p1 > p0:        return 1    else:         return 0def testingNB():    listOPosts,listClasses = loadDataSet()    myVocabList = createVocabList(listOPosts)    trainMat=[]    for postinDoc in listOPosts:        trainMat.append(setOfWords2Vec(myVocabList, postinDoc))    p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))    testEntry = ['love', 'my', 'dalmation']    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))    print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)    testEntry = ['stupid', 'garbage']    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))    print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)


应用部分:使用朴素贝叶斯过滤垃圾邮件,使用到交叉验证。

数据准备会有spam文件夹下面全部是标记的垃圾邮件,ham下是正常邮件。

def textParse(bigString):    #input is big string, #output is word list    import re    listOfTokens = re.split(r'\W*', bigString)    return [tok.lower() for tok in listOfTokens if len(tok) > 2]     def spamTest():    docList=[]; classList = []; fullText =[]    for i in range(1,26):        wordList = textParse(open('email/spam/%d.txt' % i).read())        docList.append(wordList)        fullText.extend(wordList)        classList.append(1)        wordList = textParse(open('email/ham/%d.txt' % i).read())        docList.append(wordList)        fullText.extend(wordList)        classList.append(0)    vocabList = createVocabList(docList)#create vocabulary    trainingSet = range(50); testSet=[]           #create test set    for i in range(10):        randIndex = int(random.uniform(0,len(trainingSet)))        testSet.append(trainingSet[randIndex])        del(trainingSet[randIndex])      trainMat=[]; trainClasses = []    for docIndex in trainingSet:#train the classifier (get probs) trainNB0        trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))        trainClasses.append(classList[docIndex])    p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))    errorCount = 0    for docIndex in testSet:        #classify the remaining items        wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])        if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:            errorCount += 1            print "classification error",docList[docIndex]    print 'the error rate is: ',float(errorCount)/len(testSet)    #return vocabList,fullText

textParse 接受字符串进行解析为字符串列表。

spamTest 在50封邮件中选取10篇邮件随机选择为测试集交叉验证。

所有代码汇总在一个bayes.py文件中:

from numpy import *def loadDataSet():    postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],                 ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],                 ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],                 ['stop', 'posting', 'stupid', 'worthless', 'garbage'],                 ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],                 ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]    classVec = [0,1,0,1,0,1]    #1 is abusive, 0 not    return postingList,classVec                 def createVocabList(dataSet):    vocabSet = set([])  #create empty set    for document in dataSet:        vocabSet = vocabSet | set(document) #union of the two sets    return list(vocabSet)def setOfWords2Vec(vocabList, inputSet):    returnVec = [0]*len(vocabList)    for word in inputSet:        if word in vocabList:            returnVec[vocabList.index(word)] = 1        else: print "the word: %s is not in my Vocabulary!" % word    return returnVecdef trainNB0(trainMatrix,trainCategory):    numTrainDocs = len(trainMatrix)    numWords = len(trainMatrix[0])    pAbusive = sum(trainCategory)/float(numTrainDocs)    p0Num = ones(numWords); p1Num = ones(numWords)      #change to ones()     p0Denom = 2.0; p1Denom = 2.0                        #change to 2.0    for i in range(numTrainDocs):        if trainCategory[i] == 1:            p1Num += trainMatrix[i]            p1Denom += sum(trainMatrix[i])        else:            p0Num += trainMatrix[i]            p0Denom += sum(trainMatrix[i])    p1Vect = log(p1Num/p1Denom)          #change to log()    p0Vect = log(p0Num/p0Denom)          #change to log()    return p0Vect,p1Vect,pAbusivedef classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):    p1 = sum(vec2Classify * p1Vec) + log(pClass1)    #element-wise mult    p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)    if p1 > p0:        return 1    else:         return 0def testingNB():    listOPosts,listClasses = loadDataSet()    myVocabList = createVocabList(listOPosts)    trainMat=[]    for postinDoc in listOPosts:        trainMat.append(setOfWords2Vec(myVocabList, postinDoc))    p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))    testEntry = ['love', 'my', 'dalmation']    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))    print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)    testEntry = ['stupid', 'garbage']    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))    print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)def bagOfWords2VecMN(vocabList, inputSet):    returnVec = [0]*len(vocabList)    for word in inputSet:        if word in vocabList:            returnVec[vocabList.index(word)] += 1    return returnVecdef textParse(bigString):    #input is big string, #output is word list    import re    listOfTokens = re.split(r'\W*', bigString)    return [tok.lower() for tok in listOfTokens if len(tok) > 2]     def spamTest():    docList=[]; classList = []; fullText =[]    for i in range(1,26):        wordList = textParse(open('email/spam/%d.txt' % i).read())        docList.append(wordList)        fullText.extend(wordList)        classList.append(1)        wordList = textParse(open('email/ham/%d.txt' % i).read())        docList.append(wordList)        fullText.extend(wordList)        classList.append(0)    vocabList = createVocabList(docList)#create vocabulary    trainingSet = range(50); testSet=[]           #create test set    for i in range(10):        randIndex = int(random.uniform(0,len(trainingSet)))        testSet.append(trainingSet[randIndex])        del(trainingSet[randIndex])      trainMat=[]; trainClasses = []    for docIndex in trainingSet:#train the classifier (get probs) trainNB0        trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))        trainClasses.append(classList[docIndex])    p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))    errorCount = 0    for docIndex in testSet:        #classify the remaining items        wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])        if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:            errorCount += 1            print "classification error",docList[docIndex]    print 'the error rate is: ',float(errorCount)/len(testSet)    #return vocabList,fullText    if __name__ == "__main__":    listOPosts,listClasses = loadDataSet()    myVocabList = createVocabList(listOPosts)    print myVocabList    trainMat = []    for postinDoc in listOPosts:        trainMat.append(setOfWords2Vec(myVocabList, postinDoc))    p0V,p1V,pAb = trainNB0(trainMat, listClasses)    testingNB()    spamTest()



 

 

0 0
原创粉丝点击