使用Logistic回归进行分类(代码笔记)

来源:互联网 发布:广联达计价软件多少钱 编辑:程序博客网 时间:2024/06/05 01:10

基于Sigmoid函数Logistic回归的分类算法。

思想:使用梯度上升找到最优回归系数,相当于找到决策边界。再用数据特征和Logistic回归就能算出分类。

import numpy as np'''用Logistic回归拟合决策边界从而进行分类'''def sigmoid(x):    try:        return 1.0/(1+np.exp(-x))    except:        return 0.0  #overflow# 随机梯度上升算法def stocGradAscent1(xMat, classLabels, numIter=150):    m,n = np.shape(xMat)    w = np.ones(n)    for j in range(numIter):        dataIdx = range(m)        for i in range(m):            alpha = 4/(1.0+j+i)+0.01            randIdx = int(np.random.uniform(0,len(dataIdx)))            h = sigmoid(sum(xMat[dataIdx[randIdx]]*w))            err = classLabels[dataIdx[randIdx]] - h            w = w + alpha*err*xMat[dataIdx[randIdx]]            del(dataIdx[randIdx])    return wdef classifyVector(x, w):    prob = sigmoid(sum(x*w))    if prob>0.5:return 1.0    else:return 0.0def colicTest():    allData = open('horseColic.txt')    allSet = []    allLab = []    for line in allData.readlines():        currLine = line.strip().split(' ')        lineArr = []        for i in range(27):  # 27个特征,第28列是类别            lineArr.append(0 if currLine[i] is '?' else float(currLine[i]))  # 缺失数据补为0        allSet.append(lineArr)        allLab.append(0.0 if int(currLine[27]) is 2 else float(currLine[27]))# 标签1和2变成1和0    trainSet = np.array(allSet[:300])  # 共368个样本,前300个用作训练,后68个用于预测    trainLab = np.array(allLab[:300])    testSet = np.array(allSet[300:])    testLab = np.array(allLab[300:])    trainW = stocGradAscent1(trainSet, trainLab, 500)  # 梯度上升迭代求回归系数        # ----------------------------- 预测样本集 ------------------------------    predLab = []    errCount = 0.0    numTest = np.shape(testSet)[0]    for i in range(numTest):  # 依次计算每个预测样本        predLab.append(classifyVector(testSet[i], trainW))        print(classifyVector(testSet[i], trainW), testLab[i])        if int(classifyVector(testSet[i], trainW)) is not int(testLab[i]):            errCount += 1.0    errRate = float(errCount)/numTest    print('the error rate is: %f, error count:%f' % (errRate,errCount))    return predLab, errRate



原创粉丝点击