python机器学习实战3:朴素贝叶斯分类器

来源:互联网 发布:报表制作软件 编辑:程序博客网 时间:2024/06/06 19:45

1.朴素贝叶斯简介

第一个问题为什么叫“朴素”?是因为在整个形式化过程只做最原始、最简单的假设。朴素贝叶斯的优点:在数据较少的情况下仍然有效,可以处理多类别问题。缺点是对于输入数据的准备方式较为敏感,适用的数据类型:标称型数据。网上基本的介绍贝叶斯的理论已经很多了,这里我就不再重复赘述了。
LZ分享一下原始代码和数据集,有兴趣的小伙伴可以自行下载,本代码在python2.7版本下运行是没有问题的,如果是python3.X版本的可能有些地方会报错,这个代码修改请小伙伴自行解决了。链接: https://pan.baidu.com/s/1c228QTM 密码: 35yf

2.代码实现

#coding:utf-8#导入numpy依赖库from numpy import *#词表到向量的转换函数,首先要先构建数据集及其对应的标签def loadDataSet():    postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],                 ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],                 ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],                 ['stop', 'posting', 'stupid', 'worthless', 'garbage'],                 ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],                 ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]    classVec = [0,1,0,1,0,1]    #1 is abusive, 0 not    return postingList,classVecdef createVocabList(dataSet):    vocabSet = set([])  #create empty set    for document in dataSet:        #操作符|用于两个集合的并集        vocabSet = vocabSet | set(document) #union of the two sets    return list(vocabSet)#转换成词向量,长度和输入的单词列表相同,如果在文本中出现那个单词则该单词出现的位置值设为1def setOfWords2Vec(vocabList, inputSet):    returnVec = [0]*len(vocabList)    for word in inputSet:        if word in vocabList:            returnVec[vocabList.index(word)] = 1        else: print "the word: %s is not in my Vocabulary!" % word    return returnVec#朴素贝叶斯分类器训练函数def trainNB0(trainMatrix,trainCategory):    numTrainDocs = len(trainMatrix)    numWords = len(trainMatrix[0])    pAbusive = sum(trainCategory)/float(numTrainDocs)    p0Num = ones(numWords); p1Num = ones(numWords)      #change to ones()     p0Denom = 2.0; p1Denom = 2.0                        #change to 2.0    for i in range(numTrainDocs):        if trainCategory[i] == 1:            p1Num += trainMatrix[i]            p1Denom += sum(trainMatrix[i])        else:            p0Num += trainMatrix[i]            p0Denom += sum(trainMatrix[i])    p1Vect = log(p1Num/p1Denom)          #change to log()    p0Vect = log(p0Num/p0Denom)          #change to log()    return p0Vect,p1Vect,pAbusive#朴素贝叶斯分类函数def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):    p1 = sum(vec2Classify * p1Vec) + log(pClass1)    #element-wise mult    p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)    if p1 > p0:        return 1    else:         return 0#文档的词袋模型,每当遇到一个单词是,它会增加词向量中的对应值,而不将对应值设为1,设为1的是词集模型    def bagOfWords2VecMN(vocabList, inputSet):    returnVec = [0]*len(vocabList)    for word in inputSet:        if word in vocabList:            returnVec[vocabList.index(word)] += 1    return returnVec#便利函数def testingNB():    listOPosts,listClasses = loadDataSet()    myVocabList = createVocabList(listOPosts)    trainMat=[]    for postinDoc in listOPosts:        trainMat.append(setOfWords2Vec(myVocabList, postinDoc))    p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))    testEntry = ['love', 'my', 'dalmation']    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))    print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)    testEntry = ['stupid', 'garbage']    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))    print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)#切分文本#文件解析完整的垃圾邮件测试函数def textParse(bigString):    #input is big string, #output is word list    import re    listOfTokens = re.split(r'\W*', bigString)    return [tok.lower() for tok in listOfTokens if len(tok) > 2] #对贝叶斯垃圾邮件分类器进行自动化处理    def spamTest():    docList=[]; classList = []; fullText =[]    for i in range(1,26):        wordList = textParse(open('email/spam/%d.txt' % i).read())        docList.append(wordList)        fullText.extend(wordList)        classList.append(1)        wordList = textParse(open('email/ham/%d.txt' % i).read())        docList.append(wordList)        fullText.extend(wordList)        classList.append(0)    vocabList = createVocabList(docList)#create vocabulary    trainingSet = range(50); testSet=[]           #create test set    for i in range(10):        randIndex = int(random.uniform(0,len(trainingSet)))        testSet.append(trainingSet[randIndex])        del(trainingSet[randIndex])      trainMat=[]; trainClasses = []    for docIndex in trainingSet:#train the classifier (get probs) trainNB0        trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))        trainClasses.append(classList[docIndex])    p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))    errorCount = 0    for docIndex in testSet:        #classify the remaining items        wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])        if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:            errorCount += 1            print "classification error",docList[docIndex]    print 'the error rate is: ',float(errorCount)/len(testSet)    #return vocabList,fullText#RSS源分类器及高频词去除函数#遍历词汇表中每个词,并统计它在文本中出现的次数,排序后返回前30个单词def calcMostFreq(vocabList,fullText):    import operator    freqDict = {}    for token in vocabList:        freqDict[token]=fullText.count(token)    sortedFreq = sorted(freqDict.iteritems(), key=operator.itemgetter(1), reverse=True)     return sortedFreq[:30]       def localWords(feed1,feed0):    import feedparser    docList=[]; classList = []; fullText =[]    minLen = min(len(feed1['entries']),len(feed0['entries']))    for i in range(minLen):        wordList = textParse(feed1['entries'][i]['summary'])        docList.append(wordList)        fullText.extend(wordList)        classList.append(1) #NY is class 1        wordList = textParse(feed0['entries'][i]['summary'])        docList.append(wordList)        fullText.extend(wordList)        classList.append(0)    vocabList = createVocabList(docList)#create vocabulary    top30Words = calcMostFreq(vocabList,fullText)   #remove top 30 words    for pairW in top30Words:        if pairW[0] in vocabList: vocabList.remove(pairW[0])    trainingSet = range(2*minLen); testSet=[]           #create test set    for i in range(20):        randIndex = int(random.uniform(0,len(trainingSet)))        testSet.append(trainingSet[randIndex])        del(trainingSet[randIndex])      trainMat=[]; trainClasses = []    for docIndex in trainingSet:#train the classifier (get probs) trainNB0        trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))        trainClasses.append(classList[docIndex])    p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))    errorCount = 0    for docIndex in testSet:        #classify the remaining items        wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])        if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:            errorCount += 1    print 'the error rate is: ',float(errorCount)/len(testSet)    return vocabList,p0V,p1V#最具表征性词汇显示函数def getTopWords(ny,sf):    import operator    vocabList,p0V,p1V=localWords(ny,sf)    topNY=[]; topSF=[]    for i in range(len(p0V)):        if p0V[i] > -6.0 : topSF.append((vocabList[i],p0V[i]))        if p1V[i] > -6.0 : topNY.append((vocabList[i],p1V[i]))    sortedSF = sorted(topSF, key=lambda pair: pair[1], reverse=True)    print "SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**"    for item in sortedSF:        print item[0]    sortedNY = sorted(topNY, key=lambda pair: pair[1], reverse=True)    print "NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**"    for item in sortedNY:        print item[0]

哈哈,完美啦O(∩_∩)O

0 0