初学 Tensorflow (构造神经网络)

来源:互联网 发布:访客统计系统源码 编辑:程序博客网 时间:2024/06/05 15:56
# coding: utf-8import tensorflow as tf import numpy as np# 添加层(输入数据,输入单位,输出单位,激励函数)def add_layer(inputs, in_size, out_size, activation_fuction = None):    # 行数为 in_size, 列数为 out_size 的矩阵    Weights = tf.Variable(tf.random_normal([in_size, out_size]))    # 建议 biases 不为 0, 所以 + 0.1    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)    outputs = tf.matmul(inputs, Weights) + biases    if activation_fuction is not None:        outputs = activation_fuction(outputs)    return outputs# 构造 x, y 的数据x_data = np.linspace(-1, 1, 300, dtype='float32')[:, np.newaxis]noise = np.random.normal(0, 0.05, x_data.shape)y_data = np.square(x_data) - 0.5 + noise# 有 10 个神经元的隐藏层l1 = add_layer(x_data, 1, 10, activation_fuction=tf.nn.relu)# None 表示任意数都可以xs = tf.placeholder(tf.float32, [None, 1])ys = tf.placeholder(tf.float32, [None, 1])# 输出层prediction = add_layer(l1, 10, 1, activation_fuction=None)# 计算误差loss = tf.reduce_mean(tf.square(y_data - prediction))# 设置学习效率为 0.1train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)init = tf.global_variables_initializer() sess = tf.Session()sess.run(init)for i in range(1000):    sess.run(train_step, feed_dict = {xs: x_data, ys: y_data})    if i % 50 == 0:        print sess.run(loss, feed_dict = {xs: x_data, ys: y_data})


使结果可视化:

# coding: utf-8import tensorflow as tf import numpy as npimport matplotlib.pyplot as plt # 添加层(输入数据,输入单位,输出单位,激励函数)def add_layer(inputs, in_size, out_size, activation_fuction = None):    # 行数为 in_size, 列数为 out_size 的矩阵    Weights = tf.Variable(tf.random_normal([in_size, out_size]))    # 建议 biases 不为 0, 所以 + 0.1    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)    outputs = tf.matmul(inputs, Weights) + biases    if activation_fuction is not None:        outputs = activation_fuction(outputs)    return outputs# 构造 x, y 的数据x_data = np.linspace(-1, 1, 300, dtype='float32')[:, np.newaxis]noise = np.random.normal(0, 0.05, x_data.shape)y_data = np.square(x_data) - 0.5 + noise# 有 10 个神经元的隐藏层l1 = add_layer(x_data, 1, 10, activation_fuction=tf.nn.relu)# None 表示任意数都可以xs = tf.placeholder(tf.float32, [None, 1])ys = tf.placeholder(tf.float32, [None, 1])# 输出层prediction = add_layer(l1, 10, 1, activation_fuction=None)# 计算误差loss = tf.reduce_mean(tf.square(y_data - prediction))# 设置学习效率为 0.1train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)init = tf.global_variables_initializer() sess = tf.Session()sess.run(init)# 输出结果可视化fig = plt.figure()ax = fig.add_subplot(1, 1, 1)ax.scatter(x_data, y_data)plt.ion()plt.show()for i in range(1000):    sess.run(train_step, feed_dict = {xs: x_data, ys: y_data})    if i % 50 == 0:        # print sess.run(loss, feed_dict = {xs: x_data, ys: y_data})        try:            ax.lines.remove(lines[0])        except Exception:            pass        prediction_value = sess.run(prediction, feed_dict = {xs: x_data, ys: y_data})        lines = ax.plot(x_data, prediction_value, 'r-', lw = 5)        plt.pause(0.1)



原创粉丝点击