《机器学习实战》决策树
来源:互联网 发布:软件如何创建快捷方式 编辑:程序博客网 时间:2024/05/16 18:16
《机器学习实战》K近邻(KNN)分类
上一章写了K近邻分类(见上链接),本章将学习决策树的python实现,虽然K近邻在大多数的时候工作很好,但是没有一个直观的认识,而决策树可以将分类视觉化,使人们对其分类一目了然,当然,对于大型的决策树还是很难进行阅读的。本次的实现决策树只生成树,画出树图,不剪枝。
python实现决策树
##function to calculate the Shannon entropy of a datasetfrom math import logimport operatordef calcShannonEnt(dataSet): numEntries = len(dataSet) labelCounts = {} for featVec in dataSet: currentLabel = featVec[-1] if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0 labelCounts[currentLabel] += 1 shannonEnt = 0.0 for key in labelCounts: prob = float(labelCounts[key])/numEntries shannonEnt -= prob * log(prob,2)#熵的公式-sum pi*log2(pi) i从1到N return shannonEntdef createDataSet(): dataSet = [[1,1,'yes'],[1,1,'yes'],[1,0,'no'],[0,1,'no'],[0,1,'no']] labels = ['no surfacing','flippers'] return dataSet, labels## dataset splitting on a given feature def splitDataSet(dataSet,axis, value):#按某一个特征分割数据 retDataSet = [] for featVec in dataSet: if featVec[axis] == value: reducedFeatVec = featVec[:axis] reducedFeatVec.extend(featVec[axis+1:])#extend见《python小函数(一)》 retDataSet.append(reducedFeatVec) return retDataSet## choosing the best feature to split on def chooseFeature(dataSet): numFeatures = len(dataSet[0]) - 1 baseEntropy = calcShannonEnt(dataSet)#计算经验熵 bestInfoGain = 0.0; bestFeature = -1 for i in range(numFeatures): featList = [example[i] for example in dataSet] uniqueVals = set(featList)#set见《python小函数(一)》 newEntropy = 0.0 for value in uniqueVals: subDataSet = splitDataSet(dataSet, i, value) prob = len(subDataSet)/float(len(dataSet)) newEntropy += prob * calcShannonEnt(subDataSet)#计算条件经验熵 infoGain = baseEntropy - newEntropy#信息增益 if (infoGain > bestInfoGain): bestInfoGain = infoGain bestFeature = i #选择最佳分类特征i return bestFeaturedef majorityCnt(classList):#多数表决原则 classCount = {} for vote in classList: if vote not in classCount.keys(): classCount[vote] = 0 #也可以用classCount.get(vote,0) classCount[vote] +=1 sortedClassCount = sorted(classCount.iteritems(), key = operator.itemgetter(1), reverse = True) return sortedClassCount[0][0] ##trees-building code def createTree(dataSet, labels): classList = [example[-1] for example in dataSet]#取最后一个,即类 if classList.count(classList[0]) == len(classList): return classList[0] if len(dataSet[0]) == 1: return majorityCnt(classList) bestFeat = chooseFeature(dataSet) bestFeatLabel = labels[bestFeat] myTree = {bestFeatLabel:{}}#字典里面又包含字典,形成树 del(labels[bestFeat])#删除最佳特征标签,然后再从剩下的特征选取最佳特征 featValues = [example[bestFeat] for example in dataSet] uniqueVals = set(featValues) for value in uniqueVals: subLabels = labels[:] myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)#递归调用 return myTree##Classification function for an existing decision treedef classify(inputTree,featLabels,testVec): firstStr = inputTree.keys()[0] secondDict = inputTree[firstStr] featIndex = featLabels.index(firstStr) for key in secondDict.keys(): if testVec[featIndex]== key: if type(secondDict[key]).__name__=='dict': classLabel = classify(secondDict[key],featLabels,testVec) else: classLabel = secondDict[key] return classLabel##methods for persisting the decision tree with pickledef storeTree(inputTree,filename): import pickle fw = open(filename,"w") pickle.dump(inputTree,fw) fw.close()def grabTree(filename): import pickle fr = open(filename) return pickle.load(fr)##plotting tree nodes with text annotationsimport matplotlib.pyplot as pltdecisionNode = dict(boxstyle="sawtooth",fc="0.8")leafNode = dict(boxstyle="round4", fc="0.8")arrow_args = dict(arrowstyle="<-")def plotNode(nodeTxt,centerPt,parentPt,nodeType): createPlot.ax1.annotate(nodeTxt,xy=parentPt,xycoords='axes fraction',xytext=centerPt,textcoords='axes fraction',va="center",ha="center",bbox=nodeType,arrowprops=arrow_args)'''def createPlot(): fig = plt.figure(1,facecolor='white') fig.clf() createPlot.ax1 = plt.subplot(111,frameon=False) plotNode('a decision node',(0.5,0.1),(0.1,0.5),decisionNode) plotNode('a leaf node',(0.8,0.1),(0.3,0.8),leafNode) plt.show()''' ## identifying the number of leaves in a tree and the depth def getNumLeafs(myTree): numLeafs = 0 firstStr = myTree.keys()[0] secondDict= myTree[firstStr] for key in secondDict.keys(): if type(secondDict[key]).__name__=='dict': numLeafs += getNumLeafs(secondDict[key]) else: numLeafs += 1 return numLeafsdef getTreeDepth(myTree): maxDepth = 0 firstStr = myTree.keys()[0] secondDict = myTree[firstStr] for key in secondDict.keys(): if type(secondDict[key]).__name__=='dict': thisDepth = 1 + getTreeDepth(secondDict[key]) else: thisDepth = 1 if thisDepth > maxDepth: maxDepth = thisDepth return maxDepthdef retrieveTree(i): listOfTrees = [{'no surfacing':{0:'no',1:{'flippers':{0:'no',1:'yes'}}}}, {'no surfacing':{0:'no',1:{'flippers':{0:{'head':{0:'no',1:'yes'}},1:'no'}}}}] return listOfTrees[i]## The plotTree functiondef plotMidText(cntrPt,parentPt,txtString): xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0] yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1] createPlot.ax1.text(xMid,yMid,txtString)def plotTree(myTree,parentPt,nodeTxt): numLeafs = getNumLeafs(myTree) getTreeDepth(myTree) firstStr = myTree.keys()[0] cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW,plotTree.yOff) plotMidText(cntrPt,parentPt,nodeTxt) plotNode(firstStr, cntrPt,parentPt, decisionNode) secondDict = myTree[firstStr] plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD for key in secondDict.keys(): if type(secondDict[key]).__name__ == 'dict': plotTree(secondDict[key],cntrPt,str(key)) else: plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW plotNode(secondDict[key],(plotTree.xOff,plotTree.yOff),cntrPt,leafNode) plotMidText((plotTree.xOff,plotTree.yOff),cntrPt,str(key)) plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalDdef createPlot(inTree): fig = plt.figure(1,facecolor='white') fig.clf() axprops = dict(xticks=[],yticks=[]) createPlot.ax1 = plt.subplot(111,frameon=False,**axprops) plotTree.totalW = float(getNumLeafs(inTree)) plotTree.totalD = float(getTreeDepth(inTree)) plotTree.xOff = -0.5/plotTree.totalW plotTree.yOff = 1.0 plotTree(inTree,(0.5,1.0),'') plt.show()fr = open(r'c:/Users/ll/Documents/lenses.txt')lenses = [inst.strip().split('\t') for inst in fr.readlines()]lensesLabels = ['age','prescript','astigmatic','tearRate']
0 0
- 机器学习实战---决策树
- 机器学习实战-决策树
- 机器学习实战---决策树
- 机器学习实战 决策树
- [机器学习实战]-决策树
- 机器学习实战--决策树
- 《机器学习实战》--决策树
- 机器学习实战-决策树
- 《机器学习实战》决策树
- 机器学习实战-决策树
- 机器学习实战-决策树
- 机器学习实战:决策树
- 机器学习实战 决策树
- 机器学习实战---决策树
- 机器学习实战 决策树
- [机器学习实战] 决策树
- 机器学习实战决策树
- 机器学习实战 决策树
- SQL资料总结
- 421. Maximum XOR of Two Numbers in an Array
- android studio极简版freeline0.8.4配置指南
- APM:添加数据采集代理到目标监控APP
- leetcode--Python正则表达式解析Valid Phone nums
- 《机器学习实战》决策树
- Intent基础知识
- Create a Grok Pattern
- 九九乘法表
- Java 枚举7常见种用法
- 责任链模式
- ReSharper 配置及用法
- 数组指针和指针数组
- 【Android】RecyclerView的分割线ItemDecoration