Batch Gradient Descent(python)

来源:互联网 发布:软件测试的原理 编辑:程序博客网 时间:2024/05/16 02:12
import numpy as npimport tensorflow as tfdef GradientDescent(x,y,theta):    m, n = x.shape  # m is #training example,while n is #feature    for j in range(n):        #learning rate:0.03        theta[j] = theta[j] + 0.03/m * np.sum(([(y[i] -np.matmul(x[i,:],theta))*x[i,j] for i in range(m)]))    return thetax = np.array([[1],[2],[3],[4],[5],[6]])y = np.array([5,7,9,11,13,15])
#the stop conditonepison = 0.01
#add x0=1 to the datax1 = np.hstack((np.ones((6,1)), x))theta = np.zeros((2,1))# m,n = x1.shapeprint(x1.shape)while(True):    theta = GradientDescent(x1, y, theta)    prediction = np.matmul(x1, theta)    loss = np.sum((prediction.T - y)**2)    if loss < epison:        breakprint('prediction=',prediction.T)print('y=',y)print('loss=',loss)

阅读全文
0 0
原创粉丝点击