人工智能从入门到精通(15)-卷积网络在数字识别的应用

来源:互联网 发布:淘宝返场是什么意思 编辑:程序博客网 时间:2024/05/17 01:35

经典卷积网络模型

LeNet-5模型

  1. 卷积层
  2. 池化层
  3. 卷积层
  4. 池化层
  5. 全连接层
  6. 全连接层
  7. 全连接层(近似)

    代码
    train

import osimport numpy as npimport tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_dataimport mnist_inference#data paramsBATCH_SIZE = 100LEARNING_RATE_BASE = 0.8LEARNING_RATE_DECAY = 0.99REGULARAZTION_RATE = 0.0001TRAINING_STEPS = 20000MOVING_AVERAGE_DECAY = 0.99#model save path and nameMODEL_SAVE_PATH="/path/to/model"MODEL_NAME = "model.ckpt"def train(mnist):    x = tf.placeholder(tf.float32,[BATCH_SIZE,mnist_inference.IMAGE_SIZE,                                   mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS],name='x-input')    y_ = tf.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NODE],name='y-input')    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)    y = mnist_inference.inference(x,1,regularizer)    #step to control the delay    global_step = tf.Variable(0,trainable=False)    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)    variable_averages_op = variable_averages.apply(tf.trainable_variables())    #cross entropy and add the regularization    # cross_entropy = -tf.reduce_sum(y_*tf.log(y))    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))    cross_entropy_mean = tf.reduce_mean(cross_entropy)    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,                                               mnist.train.num_examples/BATCH_SIZE,                                               LEARNING_RATE_DECAY)    # tf.scalar_summary('learning_rate', learning_rate)    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)    #updata the W and variable average at the same time    with tf.control_dependencies([train_step,variable_averages_op]):        train_op = tf.no_op(name='train')    #save the model    saver = tf.train.Saver()    with tf.Session() as sess:        sess.run(tf.global_variables_initializer())        for i in range(TRAINING_STEPS):            xs,ys = mnist.train.next_batch(BATCH_SIZE)            reshaped_xs = np.reshape(xs,(BATCH_SIZE,mnist_inference.IMAGE_SIZE,                                         mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS))            _, loss_value,step = sess.run([train_op, loss,global_step], feed_dict={x: reshaped_xs, y_: ys})            if i % 1000 == 0:                # print "step %d, training accuracy %g" % (i, train_accuracy)                print ("step %d,loss is  %g" % (step,loss_value))            saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step=global_step)def main(argv=None):    mnist = input_data.read_data_sets("/path/to/MNIST_data/", one_hot=True)    train(mnist)if __name__=='__main__':    tf.app.run()

inference

import tensorflow as tfimport numpy as npfrom tensorflow.examples.tutorials.mnist import input_data#dataset paramsINPUT_NODE = 784OUTPUT_NODE = 10#cnns paramsIMAGE_SIZE = 28NUM_CHANNELS =1NUM_LABELS =10DROP_PROB = 0.5CON1_DEEP = 32CON1_SIZE = 5CON2_DEEP = 64CON2_SIZE = 5FC1_SIZE = 1024#inference structuredef inference(input_tensor,train,regularizer):    #first layer    with tf.variable_scope('layer1-conv1'):        conv1_W = tf.get_variable("weight",[CON1_SIZE,CON1_SIZE,NUM_CHANNELS,CON1_DEEP]                                  ,initializer=tf.truncated_normal_initializer(stddev=0.1))        conv1_b = tf.get_variable("bias",[CON1_DEEP],initializer=tf.constant_initializer(0.1))        #5 * 5 patch ,step 1 ,fill 0        conv1 = tf.nn.conv2d(input_tensor,conv1_W,strides=[1,1,1,1],padding='SAME')        relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_b))    with tf.variable_scope('layer1-max_pool'):        pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')    #second layer    with tf.variable_scope('layer2-conv2'):        conv2_W = tf.get_variable("weight",[CON2_SIZE,CON2_SIZE,CON1_DEEP,CON2_DEEP],                                  initializer=tf.truncated_normal_initializer(stddev=0.1))        conv2_b = tf.get_variable("bias",[CON2_DEEP],initializer=tf.constant_initializer(0.1))        #5*5 patch,step 1 ,fill 0        conv2 = tf.nn.conv2d(pool1,conv2_W,strides=[1,1,1,1],padding='SAME')        relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_b))    with tf.variable_scope('layer2-max_pool'):        pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')    #fc1 layer    pool_shape = pool2.get_shape().as_list()    nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]    reshaped = tf.reshape(pool2,[-1,nodes])    with tf.variable_scope('layer3-fc1'):        fc1_W = tf.get_variable("weight",[nodes,FC1_SIZE],                                initializer=tf.truncated_normal_initializer(stddev=0.1))        #regularizer        if regularizer != None:            tf.add_to_collection('losses',regularizer(fc1_W))        fc1_b = tf.get_variable("bias",[FC1_SIZE],initializer=tf.constant_initializer(0.1))        fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_W)+fc1_b)        if train:            fc1 = tf.nn.dropout(fc1,0.5)    with tf.variable_scope('layer3-softmax'):        fc2_W = tf.get_variable("weight",[FC1_SIZE,NUM_LABELS],                                initializer=tf.truncated_normal_initializer(stddev=0.1))        if regularizer != None:            tf.add_to_collection('losses',regularizer(fc2_W))        fc2_b = tf.get_variable("bias",[NUM_LABELS],initializer=tf.constant_initializer(0.1))        y_conv = tf.nn.softmax(tf.matmul(fc1,fc2_W)+fc2_b)    return y_conv

eval

import timeimport tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_dataimport numpy as npimport mnist_inferenceimport mnist_trainEVAL_INTERVAL_SECS = 10def evaluate(mnist):    with tf.Graph().as_default() as g:        x = tf.placeholder(tf.float32, [None, mnist_inference.IMAGE_SIZE,                                        mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS], name='x-input')        y_ = tf.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NODE],                            name='y-input')        xs = mnist.test.images        reshaped_xs = np.reshape(xs, (-1, mnist_inference.IMAGE_SIZE,                                      mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS))        test_feed = {x:reshaped_xs,y_:mnist.test.labels}        y = mnist_inference.inference(x,None,None)        correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))        accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))        variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)        variable_to_restore = variable_averages.variables_to_restore()        saver = tf.train.Saver(variable_to_restore)        while True:            with tf.Session() as sess:                ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)                if ckpt and ckpt.model_checkpoint_path:                    saver.restore(sess,ckpt.model_checkpoint_path)                    global_stop = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]                    accuracy_score = sess.run(accuracy,feed_dict=test_feed)                    print ("step %s ,accuracy is %g" %(global_stop,accuracy_score))                else:                    print ("NOT FOUND FILE")                    return                time.sleep(EVAL_INTERVAL_SECS)def main(argv=None):    mnist = input_data.read_data_sets("/path/to/MNIST_data/", one_hot=True)    evaluate(mnist)if __name__=='__main__':    tf.app.run()
原创粉丝点击