tensorflow实践系列 -- 线性规划 (batch and stochastic)

来源:互联网 发布:阿里云虚拟 编辑:程序博客网 时间:2024/06/06 13:05
import numpy as npimport tensorflow as tfimport matplotlib.pyplot as pltsess = tf.Session()#batch gradient descentx_vals = np.random.normal(1,0.1,100).reshape([100,1])y_vals = np.repeat(10.0,100).reshape([100,1])#model placeholdx_data = tf.placeholder(dtype=tf.float32,shape=[None,1])y_target = tf.placeholder(dtype=tf.float32,shape=[None,1])w = tf.Variable(tf.random_normal(shape=[1,1]))b = tf.Variable(tf.random_normal(shape=[1,1]))#cost functiony_hat = tf.add(tf.matmul(x_data,w),b)#print(tf.reduce_mean(tf.square(x_vals - y_vals)))loss = tf.reduce_mean(tf.square(y_hat - y_target))#init variableinit = tf.global_variables_initializer()sess.run(init)#optmy_opt = tf.train.GradientDescentOptimizer(0.02)train_step = my_opt.minimize(loss)#batch gradient descent batch_cache = [] for i in range(100):     sess.run(train_step,feed_dict={x_data:x_vals,y_target:y_vals})     if (i+1) % 5 == 0:         print('w: '+str(sess.run(w)))         print('b: '+str(sess.run(b)))         tmploss = sess.run(loss, feed_dict={x_data: x_vals, y_target: y_vals})         print('loss: ' + str(tmploss))         batch_cache.append(tmploss) plt.plot(range(0, 100, 5), batch_cache, 'b-', label='batch Loss')#stochastic gradient descentstochastic_cache = []for i in range(100):    rand_index = np.random.choice(100)    rand_x = [x_vals[rand_index]]    rang_y = [y_vals[rand_index]]    sess.run(train_step, feed_dict={x_data: rand_x, y_target: rang_y})    if i % 5 == 0:        print('w: ' + str(sess.run(w)))        print('b: ' + str(sess.run(b)))        tmploss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rang_y})        print('loss: ' + str(tmploss))        stochastic_cache.append(tmploss)print("===over===")plt.plot(range(0,100,5),stochastic_cache,'b-',label='Stochastic Loss')plt.show()print(1.05*sess.run(w) + sess.run(b))

batch gradient decent




syochastic graident descent


可以明显看出,随机梯度下降相比批量梯度下降要震荡的比较多


原创粉丝点击