tensorflow Examples:<4>实现RNN

来源:互联网 发布:免费视频编辑软件排行 编辑:程序博客网 时间:2024/05/17 02:44
# -*- coding: utf-8 -*-# @Author: xiaodong# @Date:   2017-06-19 21:26:12# @Last Modified by:   xiaodong# @Last Modified time: 2017-07-02 10:15:48import tensorflow as tffrom tensorflow.contrib import rnn# Import MNIST datafrom tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets("/tmp/data/", one_hot=True)'''To classify images using a recurrent neural network, we consider every imagerow as a sequence of pixels. Because MNIST image shape is 28*28px, we will thenhandle 28 sequences of 28 steps for every sample.'''# Parameterslearning_rate = 0.001training_iters = 100000batch_size = 128display_step = 10# Network Parametersn_input = 28 # MNIST data input (img shape: 28*28)n_steps = 28 # timestepsn_hidden = 128 # hidden layer num of featuresn_classes = 10 # MNIST total classes (0-9 digits)# tf Graph inputx = tf.placeholder("float", [None, n_steps, n_input])y = tf.placeholder("float", [None, n_classes])# Define weightsweights = {    'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))}biases = {    'out': tf.Variable(tf.random_normal([n_classes]))}def RNN(x, weights, biases):    # Prepare data shape to match `rnn` function requirements    # Current data input shape: (batch_size, n_steps, n_input)    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)    # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)    x = tf.unstack(x, n_steps, 1)    # Define a lstm cell with tensorflow    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)    # Get lstm cell output    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)    # Linear activation, using rnn inner loop last output    return tf.matmul(outputs[-1], weights['out']) + biases['out']pred = RNN(x, weights, biases)# Define loss and optimizercost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)# Evaluate modelcorrect_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))# Initializing the variablesinit = tf.global_variables_initializer()# Launch the graphwith tf.Session() as sess:    sess.run(init)    step = 1    # Keep training until reach max iterations    while step * batch_size < training_iters:        batch_x, batch_y = mnist.train.next_batch(batch_size)        # Reshape data to get 28 seq of 28 elements        batch_x = batch_x.reshape((batch_size, n_steps, n_input))        # Run optimization op (backprop)        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})        if step % display_step == 0:            # Calculate batch accuracy            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})            # Calculate batch loss            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})            print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \                  "{:.6f}".format(loss) + ", Training Accuracy= " + \                  "{:.5f}".format(acc))        step += 1    print("Optimization Finished!")    # Calculate accuracy for 128 mnist test images    test_len = 128    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))    test_label = mnist.test.labels[:test_len]    print("Testing Accuracy:", \        sess.run(accuracy, feed_dict={x: test_data, y: test_label}))