线性回归和梯度下降

来源:互联网 发布:calibri字体for mac 编辑:程序博客网 时间:2024/04/30 00:43

# -*- coding: utf-8 -*-import numpy as npimport mkdata as mkimport matplotlib.pyplot as pltimport randomN = 100iterNums = 10000#核函数 将属性(x1,x2)映射为(x1,x2,x1*x2,x1*x1,x2*x2)def kernel(X,y):    m,n = X.shape    X_copy = np.zeros((m+3, n))        X_copy[0]=X[0]    X_copy[1]=X[1]        for i in range(n):        X_copy[2][i] = X[0][i]*X[1][i]        X_copy[3][i] = X[0][i]*X[0][i]        X_copy[4][i] = X[1][i]*X[1][i]    return X_copy    #随机梯度下降def gradientDescent_stochastic(X,y):    m,n = X.shape    w = np.zeros(m)    b = 0        for i in range(iterNums):        j = random.choice(range(n))        w = w - (np.dot(w, X[:,j].T)+b - y[0][j])*X[:,j]        b = b - (np.dot(w, X[:,j].T)+b-y[0][j])    return w,b  #批量梯度下降      def gradientDescent_batch(X, y):            m,n = X.shape    w = np.zeros(m)    b = 0        for i in range(iterNums):        diff = np.zeros(m)        bb = 0        for j in range(n):            diff += (np.dot(w, X[:,j].T)+b-y[0][j])*X[:,j]            bb += np.dot(w,X[:,j].T)+b-y[0][j]        w = w - 0.01 * diff        b = b - 0.01 * bb    return w, b    if __name__ == "__main__":    X,y,w = mk.mk_data(N)    theta, bias = gradientDescent_batch(X,y)    #theta, bias = gradientDescent_stochastic(X,y)    plt.scatter(X[0,y[0]==1], X[1, y[0]==1], color='red')    plt.scatter(X[0,y[0]==-1], X[1, y[0]==-1], color='g')        x=np.arange(-2,2,0.1)    x2 = (-bias-theta[0]*x)/theta[1]        plt.plot(x,x2)        plt.show()


0 0