机器学习实战,第三章,临时代码

来源:互联网 发布:2016gdp 知乎 编辑:程序博客网 时间:2024/05/16 11:31
#coding=utf-8from math import logdef calcShannonEnt(dataSet):    numEntries=len(dataSet)    labelCounts={}    # 为所有可能分类创建字典    for featVec in dataSet:        currentLabel=featVec[-1]        if currentLabel not in labelCounts.keys():            labelCounts[currentLabel]=0            labelCounts[currentLabel]+=1    shannonEnt=0.0 # 香农熵    for key in labelCounts:        prob=float(labelCounts[key])/numEntries        shannonEnt-=prob*log(prob,2) # 以2为底求对数    return shannonEntdef createDataSet():    dataSet=[[1,1,'yes'],           [1,1,'yes'],           [1,0,'no'],           [0,1,'no'],           [0,1,'no']]    labels=['no surfacing','flippers']    return dataSet,labelsdef splitDataSet(dataSet,axis,value):    # python在函数中传递的是列表的引用,在函数内部对列表对象的修改,    # 将会影响该列表对象的整个生存周期。所以需要在函数的开始声明新的list对象    retDataSet=[]    for featVec in dataSet:        # 将符合特征的数据抽取出来        if featVec[axis]==value:            reducedFeatVec=featVec[:axis]            reducedFeatVec.extend(featVec[axis+1:])            retDataSet.append(reducedFeatVec)    return retDataSetdef chooseBestFeatureToSplit(dataSet):    numFeatures=len(dataSet[0])-1    baseEntropy=calcShannonEnt(dataSet)    bestInfoGain=0.0 ; bestFeature=-1    # 创建一个唯一的分类标签    for i in range(numFeatures):        featList=[example[i] for example in dataSet]        uniqueVals=set(featList)        newEntropy=0.0        # 计算每一种划分方式的信息熵        for value in uniqueVals:            subDataSet=splitDataSet(dataSet,i,value)            prob=len(subDataSet)/float(len(dataSet))            newEntropy+=prob*calcShannonEnt(subDataSet)        infoGain=baseEntropy-newEntropy        # 计算最好的信息增益        if infoGain > bestInfoGain:            bestInfoGain=infoGain            bestFeature=i    return bestFeature 

0 0
原创粉丝点击