google实战tensorlfow之mnist

来源:互联网 发布:阿里云开放华北区域 编辑:程序博客网 时间:2024/06/06 00:48

mnist_inference.py

import tensorflow as tfINPUT_NODE = 784    #输入节点数OUTPUT_NODE = 10    #输出节点数LAYER1_NODE = 500   #隐藏层节点数def get_weight_variable(shape, regularizer):    weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))    if regularizer != None: tf.add_to_collection('losses', regularizer(weights))    return weightsdef inference(input_tensor, regularizer):#第一层神经网络    with tf.variable_scope('layer1'):        weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)        biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)#第二层神经网络    with tf.variable_scope('layer2'):        weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)        biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))        layer2 = tf.matmul(layer1, weights) + biases    return layer2

mnist_train.py

import tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_dataimport mnist_inferenceimport osBATCH_SIZE = 100                    #一个batch含有的数据个数LEARNING_RATE_BASE = 0.8            #基础学习率LEARNING_RATE_DECAY = 0.99          #学习率的衰减率REGULARIZATION_RATE = 0.0001TRAINING_STEPS = 30000MOVING_AVERAGE_DECAY = 0.99MODEL_SAVE_PATH="MNIST_model/"MODEL_NAME="mnist_model"def train(mnist):    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)    y = mnist_inference.inference(x, regularizer)    global_step = tf.Variable(0, trainable=False)    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)    variables_averages_op = variable_averages.apply(tf.trainable_variables())    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))    cross_entropy_mean = tf.reduce_mean(cross_entropy)    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))    learning_rate = tf.train.exponential_decay(        LEARNING_RATE_BASE,        global_step,        mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY,        staircase=True)    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)    with tf.control_dependencies([train_step, variables_averages_op]):        train_op = tf.no_op(name='train')    saver = tf.train.Saver()    with tf.Session() as sess:        tf.global_variables_initializer().run()        for i in range(TRAINING_STEPS):            xs, ys = mnist.train.next_batch(BATCH_SIZE)            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})            if i % 1000 == 0:                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)                print("After %d training step(s), loss on training batch is %g." % (step, loss_value))def main(argv=None):    mnist = input_data.read_data_sets("datasets/MNIST_data", one_hot=True)    train(mnist)if __name__ == '__main__':    tf.app.run()

mnist_eval.py

import timeimport tensorflow as tf from tensorflow.examples.tutorials.mnist import input_dataimport mnist_inferenceimport mnist_trainEVAL_INTERVAL_SECS = 10def evaluate(mnist):    with tf.Graph().as_default() as g:        x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')        validate_feed = {x:mnist.validation.images, y_:mnist.validation.labels}        y = mnist_inference.inference(x, None)        correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))        variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)        variables_to_restore = variable_averages.variables_to_restore()        saver = tf.train.Saver(variables_to_restore)        while True:            with tf.Session() as sess:                ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)                if ckpt and ckpt.model_checkpoint_path:                    saver.restore(sess, ckpt.model_checkpoint_path)                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]                    accuracy_score = sess.run(accuracy, feed_dict=validate_feed)                    print ("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))                else:                    print ('NO checkpoint file found')                    return time.sleep(EVAL_INTERVAL_SECS)def main(argv=None):    mnist = input_data.read_data_sets("datasets/MNIST_data", one_hot=True)    evaluate(mnist)if __name__ == '__main__':    tf.app.run()