TensorFlow学习笔记(1):最简单的入门程序

来源:互联网 发布:淘宝开店客服 编辑:程序博客网 时间:2024/06/05 05:53

1、Polynomial Regression

1.准备好数据、placeholder、Variable

import numpy as npimport tensorflow as tfimport matplotlib.pyplot as pltn_examples = 300xs = np.linspace(-3, 3, n_examples)ys = np.sin(xs) + np.random.uniform(-0.5, 0.5, n_examples)X = tf.placeholder(tf.float32, name='X')Y = tf.placeholder(tf.float32, name='Y')W = tf.Variable(tf.random_normal([1]), name='W')W_2 = tf.Variable(tf.random_normal([1], name='W_2'))W_3 = tf.Variable(tf.random_normal([1]), name='W_3')b = tf.Variable(tf.random_normal([1]), name='b')

2.定义预测结果、损失函数值、优化方法

y_ = tf.add(tf.multiply(X,W),b)y_ = tf.add(y_, tf.multiply(tf.pow(X,2),W_2))y_ = tf.add(y_, tf.multiply(tf.pow(X,3),W_3))n_samples = xs.shape[0]loss = tf.reduce_sum(tf.square(Y-y_))/n_sampleslearning_rate = 0.03optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

3.在Session中执行Graph

with tf.Session() as sess:    writer = tf.summary.FileWriter('./graphs/polynomial_reg',sess.graph)    sess.run(tf.global_variables_initializer())    for i in range (201):        total_loss = 0        for x,y in zip(xs, ys):            _,l = sess.run([optimizer, loss],feed_dict={X:x, Y:y})            total_loss += l        if i%20 == 0:            print('Epoch{0}:{1}'.format(i,total_loss/n_samples))    writer.close()    W,W_2,W_3,b = sess.run([W,W_2,W_3,b])

4.绘图查看结果

plt.plot(xs,ys,'bo',label='Real Data')plt.plot(xs,xs*W+np.power(xs,2)*W_2+np.power(xs,3)*W_3+b,'r-',lw=5,label='Predicted Data')plt.legend()plt.xlabel('x')plt.ylabel('y')plt.title('Polynomial Regression')plt.show()

这里写图片描述
5.查看Tensorboard中的Graph
打开cmd,在终端输入

$ python [yourprogram].py$ tensorboard --logdir=./graphs/polynomial_reg

打开http://DESKTOP-8U9HK9B:6006,在GRAPHS中可以看到数据流图。

2、用Logistic Regression做MNIST手写数字识别

1.准备好数据、placeholder、Variable

import tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_dataimport timemnist = input_data.read_data_sets('/data/mnist',one_hot=True)batch_size = 128X = tf.placeholder(tf.float32,[batch_size,784],name='X_placeholder')Y = tf.placeholder(tf.int16,[batch_size,10],name='Y_placeholder')W = tf.Variable(tf.random_normal(shape=[784,10],stddev=0.01),name='weight')b = tf.Variable(tf.zeros([1,10]),name='bias')

2.定义loss、optimizer

logits = tf.add(tf.matmul(X,W),b)entropy = tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = Y, name = 'entropy')loss = tf.reduce_mean(entropy)learning_rate = 0.03optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)

3.在Session中执行Graph

n_epochs = 30with tf.Session() as sess:    writer = tf.summary.FileWriter('./graphs/logistic_reg2',sess.graph)    start_time = time.time()    sess.run(tf.global_variables_initializer())    n_batchs = int(mnist.train.num_examples/batch_size)    for i in range(n_epochs):        total_loss = 0        for _ in range (n_batchs):            X_batch,Y_batch = mnist.train.next_batch(batch_size)            _, loss_batch = sess.run([optimizer, loss],feed_dict={X:X_batch,Y:Y_batch})            total_loss += loss_batch        print('Average loss in a batch (Epoch {0}): {1}'.format(i,total_loss/n_batchs))    print('Total time: {0} seconds '.format(time.time() - start_time))    print('Optimization Done!')    preds = tf.nn.softmax(logits)    isCorrect_preds = tf.equal(tf.argmax(preds,1),tf.argmax(Y,1))    accuracy = tf.reduce_sum(tf.cast(isCorrect_preds,tf.float32))    n_batchs = int(mnist.test.num_examples/batch_size)    total_correct_preds = 0    for i in range(n_batchs):        X_batch,Y_batch = mnist.test.next_batch(batch_size)        accuracy_batch = sess.run([accuracy],feed_dict={X:X_batch,Y:Y_batch})        total_correct_preds += accuracy_batch[0]    print('Accuracy: {0}'.format(total_correct_preds/mnist.test.num_examples))    writer.close()

正确率在0.9左右。

3、用DNN做MNIST手写数字识别

1.准备好数据、placeholder、Variable

import tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets('/data/mnist',one_hot=True)X = tf.placeholder(tf.float32,[None,784],name='X_placeholder')Y = tf.placeholder(tf.int8,[None,10],name='Y_placeholder')n_input = 784n_hidden_1 = 256n_hidden_2 = 256n_hidden_3 = 256n_output = 10Weights = {'h1':tf.Variable(tf.random_normal([n_input,n_hidden_1]),name='W1'),           'h2':tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2]),name='W2'),           'h3':tf.Variable(tf.random_normal([n_hidden_2,n_hidden_3]),name='W3'),           'out':tf.Variable(tf.random_normal([n_hidden_3,n_output]),name='W4')}biases = {'b1':tf.Variable(tf.random_normal([n_hidden_1]),name='b1'),          'b2':tf.Variable(tf.random_normal([n_hidden_2]),name='b2'),          'b3':tf.Variable(tf.random_normal([n_hidden_3]),name='b3'),          'out':tf.Variable(tf.random_normal([n_output]),name='b4')}

2.定义loss、optimizer

def multilayer_perceptron(x, weights, biases):    layer_1 = tf.add(tf.matmul(x, weights['h1']),biases['b1'])    layer_1 = tf.nn.relu(layer_1)    layer_2 = tf.add(tf.matmul(layer_1,weights['h2']),biases['b2'])    layer_2 = tf.nn.relu(layer_2)    layer_3 = tf.add(tf.matmul(layer_2,weights['h3']),biases['b3'])    layer_3 = tf.nn.relu(layer_3)    output_layer = tf.add(tf.matmul(layer_3,weights['out']),biases['out'])    return output_layerlogits = multilayer_perceptron(X, Weights, biases)entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=Y,name='entropy')loss = tf.reduce_mean(entropy)learning_rate = 0.003optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)

3.在Session中执行Graph

batch_size = 128n_epochs = 51n_batchs = int(mnist.train.num_examples/batch_size)with tf.Session() as sess:    sess.run(tf.global_variables_initializer())    writer = tf.summary.FileWriter('./graphs/MLP_DNN',sess.graph)    for i in range (n_epochs):        avg_loss = 0        for _ in range (n_batchs):            x_batch,y_batch = mnist.train.next_batch(batch_size)            _,l = sess.run([optimizer,loss],feed_dict={X:x_batch,Y:y_batch})            avg_loss += l / n_batchs        if i%5 == 0:            print('Epoch {0}: avg_loss = {1}'.format(i,avg_loss))    print('Optimization Done!')    isCorrect = tf.equal(tf.argmax(logits,1),tf.argmax(Y,1))    accuracy = tf.reduce_mean(tf.cast(isCorrect,tf.float32))    print('Accuracy: {0}'.format(accuracy.eval({X:mnist.test.images,Y:mnist.test.labels})))    writer.close()

试了一下,两个隐层的DNN正确率可达96%,三个隐层的DNN正确率可达97%。

附一张解释reduction_indices的图:
这里写图片描述
via Tensorflow 的reduce_sum()函数到底是什么意思,谁能解释下?