机器学习实战ch04

来源:互联网 发布:宁夏专业继续教育网络 编辑:程序博客网 时间:2024/06/09 07:57
>>> import bayes>>> listOPosts,listClasses=bayes.loadDataSet()>>> listOPosts[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'], ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'], ['stop', 'posting', 'stupid', 'worthless', 'garbage'], ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'], ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]>>> listClasses[0, 1, 0, 1, 0, 1]>>> myVocabList=bayes.createVocabList(listOPosts)>>> myVocabList['cute', 'love', 'help', 'garbage', 'quit', 'I', 'problems', 'is', 'park', 'stop', 'flea', 'dalmation', 'licks', 'food', 'not', 'him', 'buying', 'posting', 'has', 'worthless', 'ate', 'to', 'maybe', 'please', 'dog', 'how', 'stupid', 'so', 'take', 'mr', 'steak', 'my']>>> bayes.setOfWords2Vec(myVocabList,listOPosts[0])[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1]>>> trainMat=[]>>> for postinDoc in listOPosts:...     trainMat.append(bayes.setOfWords2Vec(myVocabList,postinDoc))... >>> trainMat[[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0], [1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0]]

上面运行的程序中listOPosts中有六句话,把每句话的单词拆分开来。listClasses表示着这六句话是侮辱类还是非侮辱类,比如第一句话:my dog has flea problems help please,是非侮辱类,所以标为0。myVocabList是listOPosts六句话的所有单词的集合,setOfWords2Vec函数使listOPosts中的句子由myVocabList来表示,比如listOPosts[0]中有help这个单词,那么myVocabList[2]就是1。而trianMat就是listOPosts中的六句话全部用myVocabList来表示。

def trainNB0(trainMatrix,trainCategory):    numTrainDocs = len(trainMatrix)    numWords = len(trainMatrix[0])    pAbusive = sum(trainCategory)/float(numTrainDocs)    p0Num = zeros(numWords); p1Num = zeros(numWords)      #change to ones()     p0Denom = 0; p1Denom = 0                        #change to 2.0    for i in range(numTrainDocs):        if trainCategory[i] == 1:            p1Num += trainMatrix[i]            p1Denom += sum(trainMatrix[i])        else:            p0Num += trainMatrix[i]            p0Denom += sum(trainMatrix[i])    p1Vect = p1Num/p1Denom         #change to log()    p0Vect = p0Num/p0Denom          #change to log()    return p0Vect,p1Vect,pAbusive
该函数的输入参数是:
>>> p0V,p1V,pAb=bayes.trainNB0(trainMat,listClasses)
sum(trainCategory)代表的是侮辱性的句子的条数,float(numTrainDocs)是总的句子的条数,pAbusive就是输入的样本中侮辱性句子所占的比例,这里为0.5。numWords是所有单词集合的个数,接下来的for语句中一个句子一个句子进行分析,p1Num是一个具有numWords个元素的列表,如果一个句子是侮辱性的,那么trainMatrix中跟句子相关的列表会累加起来。p0Num是一个具有numWords个元素的列表,如果一个句子是非侮辱性的,那么trainMatrix中跟句子相关的列表会累加起来。从p1V和p0V可以看出相应单词在侮辱性或非侮辱性句子中出现的比例。如p1V[26]是stupid,表示在所有侮辱性句子的单词中,这个词所占的比例是0.15789474。 

>>> p0Varray([ 0.04166667,  0.04166667,  0.04166667,  0.        ,  0.        ,        0.04166667,  0.04166667,  0.04166667,  0.        ,  0.04166667,        0.04166667,  0.04166667,  0.04166667,  0.        ,  0.        ,        0.08333333,  0.        ,  0.        ,  0.04166667,  0.        ,        0.04166667,  0.04166667,  0.        ,  0.04166667,  0.04166667,        0.04166667,  0.        ,  0.04166667,  0.        ,  0.04166667,        0.04166667,  0.125     ])>>> sum(p0V)0.99999999999999967>>> p1Varray([ 0.        ,  0.        ,  0.        ,  0.05263158,  0.05263158,        0.        ,  0.        ,  0.        ,  0.05263158,  0.05263158,        0.        ,  0.        ,  0.        ,  0.05263158,  0.05263158,        0.05263158,  0.05263158,  0.05263158,  0.        ,  0.10526316,        0.        ,  0.05263158,  0.05263158,  0.        ,  0.10526316,        0.        ,  0.15789474,  0.        ,  0.05263158,  0.        ,        0.        ,  0.        ])>>> sum(p1V)0.99999999999999978
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):    p1 = sum(vec2Classify * p1Vec) + log(pClass1)    #element-wise mult    p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)    if p1 > p0:        return 1    else:         return 0def testingNB():    listOPosts,listClasses = loadDataSet()    myVocabList = createVocabList(listOPosts)    trainMat=[]    for postinDoc in listOPosts:        trainMat.append(setOfWords2Vec(myVocabList, postinDoc))    p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))    testEntry = ['love', 'my', 'dalmation']    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))    print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)    testEntry = ['stupid', 'garbage']    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))    print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)
上面testingNB()函数中将love my dalmation转化成由myVocabList表示的数组,然后分别与p0V和p1V相乘,看哪个值大,就判定这句话是什么类型的。
bagOfWords2VecMN函数和setOfWords2Vec函数的区别在于用
            returnVec[vocabList.index(word)] += 1
替换
            returnVec[vocabList.index(word)] = 1
因为输入的句子中有可能有相同的单词。
def textParse(bigString):    #input is big string, #output is word list    import re    listOfTokens = re.split(r'\W*', bigString)    return [tok.lower() for tok in listOfTokens if len(tok) > 2]     def spamTest():    docList=[]; classList = []; fullText =[]    for i in range(1,26):        wordList = textParse(open('email/spam/%d.txt' % i).read())        docList.append(wordList)        fullText.extend(wordList)        classList.append(1)        wordList = textParse(open('email/ham/%d.txt' % i).read())        docList.append(wordList)        fullText.extend(wordList)        classList.append(0)    vocabList = createVocabList(docList)#create vocabulary    trainingSet = range(50); testSet=[]           #create test set    for i in range(10):        randIndex = int(random.uniform(0,len(trainingSet)))        testSet.append(trainingSet[randIndex])        del(trainingSet[randIndex])      trainMat=[]; trainClasses = []    for docIndex in trainingSet:#train the classifier (get probs) trainNB0        trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))        trainClasses.append(classList[docIndex])    p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))    errorCount = 0    for docIndex in testSet:        #classify the remaining items        wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])        if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:            errorCount += 1            print "classification error",docList[docIndex]    print 'the error rate is: ',float(errorCount)/len(testSet)    #return vocabList,fullText
上面的第一个函数textParse将邮件的句子拆分成单词。spamTest函数中的docList相当于前面的listOPosts,classList相当于前面的listClasses,spam和ham文件夹中各有25封邮件,将这50封邮件拆成单词,并存放于docList中。spam中的邮件标为1,ham中邮件标为0,记录在classList中。vocabList包含了50封邮件中的所有拆分的单词。然后50封邮件中随机抽取10封为测试邮件testSet,其余40封为trainingSet。由trainingSet中的邮件做成trainMat和trainingClasses,然后得到p0V,p1V和pSpam。接下来用p0V,p1V和pSpam三个参数对testSet中的10封邮件进行测试,根据测出的结果(0或1)与已知的结果比较,得出错误的概率。
>>> ny=feedparser.parse('http://newyork.craigslist.org/search/stp/index.rss')>>> sf=feedparser.parse('http://sfbay.craigslist.org/search/stp/index.rss')>>> sf['entries'][]>>> ny['entries'][]>>> import bayes>>> vocaList,pSF,pNY=bayes.localWords(ny,sf)Traceback (most recent call last):  File "<stdin>", line 1, in <module>  File "bayes.py", line 142, in localWords    testSet.append(trainingSet[randIndex])IndexError: list index out of range

上面的程序运行起来和书中的不一样,可能是RSS修改了书中ny['entries']的长度为100,但是这里该长度为0。所以程序中运行的参数trainingSet变成了0,所以报错了。





0 0
原创粉丝点击