【tensorflow1.0学习笔记003】构造神经网络与结果可视化代码

来源:互联网 发布:java 获取当前时间 编辑:程序博客网 时间:2024/04/29 14:15

以代码实例来讲解:

构造神经网络代码:

"""Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly."""import tensorflow as tfimport numpy as npimport matplotlib.pyplot as pltdef add_layer(inputs, in_size, out_size, activation_function=None):    Weights = tf.Variable(tf.random_normal([in_size, out_size]))    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)    Wx_plus_b = tf.matmul(inputs, Weights) + biases    if activation_function is None:        outputs = Wx_plus_b    else:        outputs = activation_function(Wx_plus_b)    return outputs# Make up some real datax_data = np.linspace(-1, 1, 300)[:, np.newaxis]noise = np.random.normal(0, 0.05, x_data.shape)y_data = np.square(x_data) - 0.5 + noise##plt.scatter(x_data, y_data)##plt.show()# define placeholder for inputs to networkxs = tf.placeholder(tf.float32, [None, 1])ys = tf.placeholder(tf.float32, [None, 1])# add hidden layerl1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)# add output layerprediction = add_layer(l1, 10, 1, activation_function=None)# the error between prediciton and real dataloss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction), reduction_indices=[1]))train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)# important stepinit = tf.global_variables_initializer()sess= tf.Session()sess.run(init)for i in range(1000):    # training    sess.run(train_step, feed_dict={xs: x_data, ys: y_data})    if i % 50 == 0:        # to see the step improvement        print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))


结果可视化:

""" Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly. """  import tensorflow as tf  import numpy as np  import matplotlib.pyplot as plt    def add_layer(inputs, in_size, out_size, activation_function=None):      Weights = tf.Variable(tf.random_normal([in_size, out_size]))      biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)      Wx_plus_b = tf.matmul(inputs, Weights) + biases      if activation_function is None:          outputs = Wx_plus_b      else:          outputs = activation_function(Wx_plus_b)      return outputs    # Make up some real data  x_data = np.linspace(-1, 1, 300)[:, np.newaxis]  noise = np.random.normal(0, 0.05, x_data.shape)  y_data = np.square(x_data) - 0.5 + noise    ##plt.scatter(x_data, y_data)  ##plt.show()    # define placeholder for inputs to network  xs = tf.placeholder(tf.float32, [None, 1])  ys = tf.placeholder(tf.float32, [None, 1])  # add hidden layer  l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)  # add output layer  prediction = add_layer(l1, 10, 1, activation_function=None)    # the error between prediciton and real data  loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction), reduction_indices=[1]))  train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)  # important step  init = tf.initialize_all_variables()  sess= tf.Session()  sess.run(init)    # plot the real data  fig = plt.figure()  ax = fig.add_subplot(1,1,1)  ax.scatter(x_data, y_data)  plt.ion()  plt.show()     for i in range(1000):      # training      sess.run(train_step, feed_dict={xs: x_data, ys: y_data})      if i % 50 == 0:          # to visualize the result and improvement          try:              ax.lines.remove(lines[0])          except Exception:              pass          prediction_value = sess.run(prediction, feed_dict={xs: x_data})          # plot the prediction          lines = ax.plot(x_data, prediction_value, 'r-', lw=5)          plt.pause(1) 



0 0
原创粉丝点击