TensorFlow学习笔记-实现经典LeNet5模型
来源:互联网 发布:手机淘宝部分退款流程 编辑:程序博客网 时间:2024/04/30 11:21
LeNet5模型是Yann LeCun教授于1998年提出来的,它是第一个成功应用于数字识别问题的卷积神经网络。在MNIST数据中,它的准确率达到大约99.2%.
通过TensorFlow实现的LeNet5模型,主要用到在说使用变量管理,可以增加代码可读性、降低代码冗余量,提高编程效率,更方便管理变量。我们将LeNet5模型分为三部分:
1、网络定义部分:这部分是训练和验证都需要的网络结构。
2、训练部分:用于神经网络训练MNIST训练集。
3、验证部分:验证训练模型的准确率,在Tensorflow训练过程中,可以实时验证模型的正确率。
将训练部分与验证部分分开的好处在于,训练部分可以持续输出训练好的模型,验证部分可以每隔一段时间验证模型的准确率;如果模型不好,则需要及时调整网络结构的参数。
一、 网络定义部分
import tensorflow as tfINPUT_NODE = 784OUTPUT_NODE = 10IMAGE_SIZE = 28NUM_CHANNEL = 1NUM_LABEL = 10# LAYER1CONV1_DEEP = 32CONV1_SIZE = 5# LAYER2CONV2_DEEP = 64CONV2_SIZE = 5# 全连接层FC_SIZE = 512# LAYER1_NODE = 500def interence(input_tensor,train,regularizer): with tf.variable_scope('layer1-conv'): w = tf.get_variable('w', [CONV1_SIZE,CONV1_SIZE,NUM_CHANNEL,CONV1_DEEP], initializer=tf.truncated_normal_initializer(stddev=0.1)) b = tf.get_variable('b',shape=[CONV1_DEEP],initializer=tf.constant_initializer(0.0)) # filter shape is :[filter_height, filter_width, in_channels, out_channels] # input tensor shape is:[batch, in_height, in_width, in_channels] # `strides = [1, stride, stride, 1]`. # return [batch, height, width, channels]. conv1 = tf.nn.conv2d(input_tensor,w,strides=[1,1,1,1],padding='SAME') relu1 = tf.nn.relu(tf.nn.bias_add(conv1,b)) with tf.variable_scope('layer2-pool'): pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME') with tf.variable_scope('layer3-conv'): w = tf.get_variable('w', [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP], initializer=tf.truncated_normal_initializer(stddev=0.1)) b = tf.get_variable('b',shape=[CONV2_DEEP],initializer=tf.constant_initializer(0.0)) conv2 = tf.nn.conv2d(pool1, w, strides=[1, 1, 1, 1], padding='SAME') relu2 = tf.nn.relu(tf.nn.bias_add(conv2, b)) with tf.variable_scope('layer4-pool'): # pool2 size is [batch_size,7,7,64] pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME') # 接下来是全连接层,需要将pool2转换为一维向量,作为后面的输入 pool_shape = pool2.get_shape().as_list() nodes = pool_shape[1] * pool_shape[2] * pool_shape[3] reshaped = tf.reshape(pool2,[-1,nodes]) # reshaped = tf.reshape(pool2,[BATCH_SIZE,-1]) # print(reshaped.get_shape()) with tf.variable_scope('layer5-fc1'): fc1_w = tf.get_variable('w',shape=[nodes,FC_SIZE],initializer=tf.truncated_normal_initializer(stddev=0.1)) try: # 只有全连接层的权重需要加入正则化 if regularizer != None: tf.add_to_collection('loss',regularizer(fc1_w)) except: pass fc1_b = tf.get_variable('b',shape=[FC_SIZE],initializer=tf.constant_initializer(0.1)) fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_w) + fc1_b) # 使用Dropout随机将部分节点的输出改为0,为了防止过拟合的现象,从而使模型在测试数据中表现更好。 # dropout一般只会在全连接层使用。 if train: fc1 = tf.nn.dropout(fc1,0.5) with tf.variable_scope('layer6-fc2'): fc2_w = tf.get_variable('w', shape=[FC_SIZE, NUM_LABEL], initializer=tf.truncated_normal_initializer(stddev=0.1)) try: if regularizer != None: tf.add_to_collection('loss', regularizer(fc2_w)) except: pass fc2_b = tf.get_variable('b', shape=[NUM_LABEL], initializer=tf.constant_initializer(0.1)) # 最后一层的输出,不需要加入激活函数 logit = tf.matmul(fc1, fc2_w) + fc2_b return logit
二、训练部分
import osimport tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_datafrom mnist_cnn import mnist_interenceimport numpy as npBATCH_SIZE = 100LEARNING_RATE_BASE = 0.8LEARNING_RATE_DECAY = 0.99REGULARIZATION_TATE = 0.0001MOVING_AVERAGE_DECAY = 0.99TRAIN_STEP = 300000MODEL_PATH = 'model'MODEL_NAME = 'model'def train(mnist): x = tf.placeholder(tf.float32, shape=[None, mnist_interence.IMAGE_SIZE, mnist_interence.IMAGE_SIZE, mnist_interence.NUM_CHANNEL ], name='x-input') y_ = tf.placeholder(tf.float32, shape=[None, mnist_interence.OUTPUT_NODE], name='y-input') regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_TATE) y = mnist_interence.interence(x,True,regularizer) global_step = tf.Variable(0, trainable=False) variable_average = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) variable_average_ops = variable_average.apply(tf.trainable_variables()) cross_entroy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)) cross_entroy_mean = tf.reduce_mean(cross_entroy) loss = cross_entroy_mean + tf.add_n(tf.get_collection('loss')) learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY) train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss, global_step=global_step) train_op = tf.group(train_step, variable_average_ops) saver = tf.train.Saver() with tf.Session() as sess: tf.global_variables_initializer().run() for i in range(TRAIN_STEP): # 由于神经网络的输入大小为[BATCH_SIZE,IMAGE_SIZE,IMAGE_SIZE,CHANNEL],因此需要reshape输入。 xs,ys = mnist.train.next_batch(BATCH_SIZE) reshape_xs = np.reshape(xs,(BATCH_SIZE, mnist_interence.IMAGE_SIZE, mnist_interence.IMAGE_SIZE, mnist_interence.NUM_CHANNEL)) # print(type(xs)) _,loss_value,step,learn_rate = sess.run([train_op,loss,global_step,learning_rate],feed_dict={x:reshape_xs,y_:ys}) if i % 1000 == 0: print('After %d step, loss on train is %g,and learn rate is %g'%(step,loss_value,learn_rate)) saver.save(sess,os.path.join(MODEL_PATH,MODEL_NAME),global_step=global_step)def main(): mnist = input_data.read_data_sets('../mni_data', one_hot=True) # ys = mnist.validation.labels # print(ys) train(mnist)if __name__ == '__main__': main()
验证部分
import tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_datafrom mnist_cnn import mnist_interencefrom mnist_cnn import mnist_trainEVAL_INTERVAL_SECS = 10BATCH_SIZE = 100import timeimport numpy as npdef evaluate(mnist): with tf.Graph().as_default(): x = tf.placeholder(tf.float32, shape=[None, mnist_interence.IMAGE_SIZE, mnist_interence.IMAGE_SIZE, mnist_interence.NUM_CHANNEL], name='x-input') y_ = tf.placeholder(tf.float32, shape=[None,mnist_interence.OUTPUT_NODE], name='y-input') xs, ys = mnist.validation.images, mnist.validation.labels reshape_xs = np.reshape(xs, (-1, mnist_interence.IMAGE_SIZE, mnist_interence.IMAGE_SIZE, mnist_interence.NUM_CHANNEL)) print(mnist.validation.labels[0]) val_feed = {x: reshape_xs, y_: mnist.validation.labels} y = mnist_interence.interence(x,False,None) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) variable_average = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY) val_to_restore = variable_average.variables_to_restore() saver = tf.train.Saver(val_to_restore) while True: with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_PATH) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess,ckpt.model_checkpoint_path) global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] accuracy_score = sess.run(accuracy,feed_dict=val_feed) print('After %s train ,the accuracy is %g'%(global_step,accuracy_score)) else: print('No Checkpoint file find') # continue time.sleep(EVAL_INTERVAL_SECS)def main(): mnist = input_data.read_data_sets('../mni_data',one_hot=True) evaluate(mnist)if __name__ == '__main__': main()
最后,在MNIST数据集中的准确率大约在99.4%左右
阅读全文
0 0
- TensorFlow学习笔记-实现经典LeNet5模型
- 【深度学习】TensorFlow实现LeNet5
- TensorFlow学习笔记(二十六)CNN的9大模型之LeNet5的原理讲解
- tensorflow实现lenet5
- TensorFlow MNIST CNN LeNet5模型
- 【DL笔记】LeNet5神经网络简介及TensorFlow实现
- TensorFlow学习笔记(二):TensorFlow实现线性回归模型
- TensorFlow学习笔记(三):TensorFlow实现逻辑回归模型
- 卷积神经网络LeNet5,基于TensorFlow的实现
- tensorflow13《TensorFlow实战Google深度学习框架》笔记-06-02mnist LeNet5卷积神经网络 code
- Tensorflow学习笔记-基于LeNet5结构的ORL数据集人脸识别
- TensorFlow实现经典深度学习网络(6):TensorFlow实现基于LSTM的语言模型
- tensorflow Lenet5手写字体识别模型的保存与加载
- TensorFlow学习笔记:编程模型
- C++学习笔记:c++模型设计和实现 经典案例
- TensorFlow学习笔记(二十一) tensorflow机器学习模型
- Tensorflow学习笔记-构建网络模型
- TensorFlow学习笔记2:构建CNN模型
- 20 设备树里声明i2c设备
- CSS3 Media Queries 实现响应式demo
- Python中数组及矩阵的大小
- 微信小程序有哪些商店案例
- 超简单实现Android自定义Toast(附源码)
- TensorFlow学习笔记-实现经典LeNet5模型
- LDA笔记
- 自定义View九宫格手势
- 面试题整理--Java设计模式--创建型模式
- Java堆内存Heap与非堆内存Non-Heap
- linux java程序控制台日志输出
- 【Raspberry Pi 3试用体验】+WiringPi控制GPIO
- alsa/asoundlib.h: No such file or directory
- java基础之泛型的使用