3用于MNIST的卷积神经网络-3.3简单卷积神经网络的训练和评估会话

来源:互联网 发布:数字音效软件 编辑:程序博客网 时间:2024/06/06 02:14

运行计算图

  1. 加载数据集
  2. 启动会话
  3. 一轮一轮的训练模型
    3.1在每一轮中分多个批次喂给数据
    3.1.1在每个批次上运行训练节点,训练模型
    3.1.2经过若干个批次以后,评估当前的模型,计算训练集合验证集上的损失值,准确率
  4. 在测试集上评估最终的模型:损失值,准确率
#3.1实现简单卷积神经网络对MNIST数据集进行分类:conv2d + activation + pool + fcimport csvimport tensorflow as tfimport osfrom tensorflow.examples.tutorials.mnist import input_dataos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'#设置算法超参数learning_rate_init = 0.001training_epochs = 1batch_size = 100display_step = 10#Network Parametersn_input = 784 #MNIST data input(img shape:28*28)n_class = 10 #MNIST total classes(0-9 digits)#根据指定的维数返回初始化好的指定名称和权重 Variabledef WeightsVariable(shape,name_str,stddev=0.1):    initial = tf.random_normal(shape=shape,stddev=stddev,dtype=tf.float32)    return tf.Variable(initial,dtype=tf.float32,name=name_str)#根据指定的维数返回初始化好的指定名称的偏置Variabledef BiasesVariable(shape,name_str,stddev=0.00001):    initial = tf.random_normal(shape=shape, stddev=stddev, dtype=tf.float32)    return tf.Variable(initial,dtype=tf.float32,name=name_str)#2维卷积层(conv2d + bias)封装def Conv2d(x,W,b,stride=1,padding='SAME'):    with tf.name_scope('Wx_b'):        y = tf.nn.conv2d(x,W,strides=[1,stride,stride,1],padding=padding)        y = tf.nn.bias_add(y,b)    return y#非线性激活层的封装def Activation(x,activation=tf.nn.relu,name='relu'):    with tf.name_scope(name):        y = activation(x)    return y#2维池化层pool的封装def Pool2d(x,pool=tf.nn.max_pool,k=2,stride=2):    return pool(x,ksize=[1,k,k,1],strides=[1,stride,stride,1],padding='VALID')#全连接层activate(wx+b)的封装def FullyConnnected(x,W,b,activate=tf.nn.relu,act_name='relu'):    with tf.name_scope('Wx_b'):        y = tf.matmul(x,W)        y = tf.add(y,b)    with tf.name_scope(act_name):        y = activate(y)    return y#通用的评估函数,用来评估模型在给定的数据集上的损失和准确率def EvaluateModelOnDataset(sess,images,labels):    n_sampels = images.shape[0]    per_batch_size = 100    loss = 0    acc = 0    #样本量比较少的时候,一次性评估完毕;否则拆成若干个批次评估,主要是防止内存不够用    if(n_sampels <= per_batch_size):        batch_count = 1        loss,acc = sess.run([cross_entropy_loss,accuracy],feed_dict={X_origin:images,                                                                     Y_true:labels,                                                                     learning_rate:learning_rate_init})    else:        batch_count = int(n_sampels / per_batch_size)        batch_start = 0        for idx in range(batch_count):            batch_loss,batch_acc = sess.run([cross_entropy_loss,accuracy],                                            feed_dict={X_origin:images[batch_start:batch_start+per_batch_size,:],                                                       Y_true:labels[batch_start:batch_start+per_batch_size,:],                                                       learning_rate:learning_rate_init})            batch_start += per_batch_size            #累计所有批次上的损失和准确率            loss += batch_loss            acc += batch_acc    #返回平均值    return loss / batch_count ,acc /batch_count#调用上面写的函数构造计算图with tf.Graph().as_default():    #计算图输入    with tf.name_scope('Inputs'):        X_origin = tf.placeholder(tf.float32,[None,n_input],name='X_origin')        Y_true = tf.placeholder(tf.float32, [None, n_class], name='Y_true')        #把图像数据从N*784的张量转换为N*28*28*1的张量        X_image = tf.reshape(X_origin,[-1,28,28,1])    #计算图向前推断过程    with tf.name_scope('Inference'):        #第一个卷积层(conv2d + biase)        with tf.name_scope('Conv2d'):            weights = WeightsVariable(shape=[5,5,1,16],name_str='weights')            biases = BiasesVariable(shape=[16],name_str='biases')            conv_out = Conv2d(X_image,weights,biases,stride=1,padding='VALID')        #非线性激活层        with tf.name_scope('Activate'):            activate_out = Activation(conv_out,activation=tf.nn.relu,name='relu')        #第一个池化层(max pool 2d)        with tf.name_scope('Pool2d'):            pool_out = Pool2d(activate_out,pool=tf.nn.max_pool,k=2,stride=2)        #将二维特征图变换为一维特征向量        with tf.name_scope('FeatsReshape'):            features = tf.reshape(pool_out,[-1,12*12*16])        #第一个全连接层(fully connected layer)        with tf.name_scope('FC_Linear'):            weights = WeightsVariable(shape=[12 * 12 * 16,n_class],name_str='weights')            biases = BiasesVariable(shape=[n_class],name_str='biases')            Ypred_logits = FullyConnnected(features,weights,biases,activate=tf.identity,act_name='identity')    #定义损失层(loss layer)    with tf.name_scope('Loss'):        cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = Y_true,logits = Ypred_logits))    #定义优化训练层(train layer)    with tf.name_scope('Train'):        learning_rate = tf.placeholder(tf.float32)        optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)        trainer = optimizer.minimize(cross_entropy_loss)    #定义模型评估层(evaluate layer)    with tf.name_scope('Evaluate'):        correct_pred = tf.equal(tf.arg_max(Ypred_logits,1),tf.arg_max(Y_true,1))        accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))    #添加所有变量的初始化节点    init = tf.global_variables_initializer()    print('把计算图写入事件文件,在TensorBoard里面查看')    summary_writer = tf.summary.FileWriter(logdir='logs',graph=tf.get_default_graph())    summary_writer.close()    # 读取数据集    mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)    #将评估结果保存到文件    results_list = list()    #写入参数配置    results_list.append(['learning_rate',learning_rate_init,                         'training_epochs',training_epochs,                         'batch_size',batch_size,                         'display_step',display_step])    results_list.append(['train_step', 'train_loss','validation_loss',                         'train_step', 'train_accuracy','validation_accuracy'])    with tf.Session() as sess:        sess.run(init)        total_batches = int(mnist.train.num_examples / batch_size)        print("Per batch Size :" , batch_size)        print("Train sample Count:", mnist.train.num_examples)        print("Total batch Count :", total_batches)        training_step = 0 #记录模型被训练的步数        # 训练指定轮数,每一轮所有训练样本都要过一遍        for epoch in range(training_epochs):            # 每一轮(回合)都要把所有batch跑一遍            for batch_idx in range(total_batches):                #取出数据                batch_x, batch_y = mnist.train.next_batch(batch_size)                # 运行优化器Train节点(backprop)                sess.run(trainer, feed_dict={X_origin: batch_x,                                             Y_true:batch_y,                                             learning_rate:learning_rate_init})                #每调用一次训练节点,training_step就加1,最终==training_epochs*total_batches                training_step += 1                # 每训练display_step次,计算当前模型的损失和分类准确率                if training_step % display_step == 0:                    #计算当前模型在目前(最近)见过的display_step个batch_size的训练集上的损失和分类准确率                    start_idx = max(0,(batch_idx - display_step)*batch_size)                    end_idx = batch_idx*batch_size                    train_loss ,train_acc = EvaluateModelOnDataset(sess,                                                               mnist.train.images[start_idx:end_idx,:],                                                               mnist.train.labels[start_idx:end_idx, :])                    print("Training Step:" + str(training_step) +                        ",Training Loss = " + "{:.6f}".format(train_loss)+                        ",Training Accuracy = " + "{:.5f}".format(train_acc))                    #计算当前模型在验证集中的损失和分类准确率                    validation_loss,validation_acc = EvaluateModelOnDataset(sess,                                                               mnist.train.images,                                                               mnist.train.labels)                    print("Training Step:" + str(training_step) +                        ",Validation Loss = " + "{:.6f}".format(validation_loss) +                        ",Validation Accuracy = " + "{:.5f}".format(validation_acc))                    #将评估结果保存到文件                    results_list.append([training_step,train_loss,validation_loss,                                         training_step,train_acc,validation_acc])        print("训练完毕!")        #计算指定数量的测试集上的准确率        test_samples_count = mnist.test.num_examples        test_loss ,test_accuracy = EvaluateModelOnDataset(sess,mnist.test.images, mnist.test.labels)        print("Testing Samples Count:" ,test_samples_count )        print("Testing Loss:",test_loss)        print("Testing Accuracy:" ,test_accuracy)        # 将评估结果保存到文件        results_list.append(['test step','loss',test_loss,'accuracy',test_accuracy])        #将评估结果保存到文件        results_file = open('evaluate_results.csv','w',newline='')        csv_writer = csv.writer(results_file,dialect='excel')        for row in results_list:            csv_writer.writerow(row)

输出:

把计算图写入事件文件,在TensorBoard里面查看Extracting MNIST_data/train-images-idx3-ubyte.gzExtracting MNIST_data/train-labels-idx1-ubyte.gzExtracting MNIST_data/t10k-images-idx3-ubyte.gzExtracting MNIST_data/t10k-labels-idx1-ubyte.gzPer batch Size : 100Train sample Count: 55000Total batch Count : 550Training Step:10,Training Loss = 1.756672,Training Accuracy = 0.50778Training Step:10,Validation Loss = 1.772081,Validation Accuracy = 0.50700Training Step:20,Training Loss = 1.293760,Training Accuracy = 0.69700Training Step:20,Validation Loss = 1.319923,Validation Accuracy = 0.68671Training Step:30,Training Loss = 0.896758,Training Accuracy = 0.76100Training Step:30,Validation Loss = 0.980806,Validation Accuracy = 0.73382Training Step:40,Training Loss = 0.747110,Training Accuracy = 0.78800Training Step:40,Validation Loss = 0.752963,Validation Accuracy = 0.78933Training Step:50,Training Loss = 0.619199,Training Accuracy = 0.83400Training Step:50,Validation Loss = 0.618799,Validation Accuracy = 0.82218Training Step:60,Training Loss = 0.486959,Training Accuracy = 0.86900Training Step:60,Validation Loss = 0.536908,Validation Accuracy = 0.84445Training Step:70,Training Loss = 0.448975,Training Accuracy = 0.87500Training Step:70,Validation Loss = 0.477645,Validation Accuracy = 0.86504Training Step:80,Training Loss = 0.421686,Training Accuracy = 0.89500Training Step:80,Validation Loss = 0.443736,Validation Accuracy = 0.87204Training Step:90,Training Loss = 0.419851,Training Accuracy = 0.88100Training Step:90,Validation Loss = 0.421143,Validation Accuracy = 0.87718Training Step:100,Training Loss = 0.360560,Training Accuracy = 0.89900Training Step:100,Validation Loss = 0.392523,Validation Accuracy = 0.88753Training Step:110,Training Loss = 0.365864,Training Accuracy = 0.89500Training Step:110,Validation Loss = 0.369761,Validation Accuracy = 0.89407Training Step:120,Training Loss = 0.322990,Training Accuracy = 0.90100Training Step:120,Validation Loss = 0.359142,Validation Accuracy = 0.89620Training Step:130,Training Loss = 0.287648,Training Accuracy = 0.92700Training Step:130,Validation Loss = 0.348993,Validation Accuracy = 0.89876Training Step:140,Training Loss = 0.305304,Training Accuracy = 0.91500Training Step:140,Validation Loss = 0.343317,Validation Accuracy = 0.89653Training Step:150,Training Loss = 0.329766,Training Accuracy = 0.91000Training Step:150,Validation Loss = 0.324283,Validation Accuracy = 0.90616Training Step:160,Training Loss = 0.321841,Training Accuracy = 0.90800Training Step:160,Validation Loss = 0.313210,Validation Accuracy = 0.91038Training Step:170,Training Loss = 0.311214,Training Accuracy = 0.90600Training Step:170,Validation Loss = 0.307121,Validation Accuracy = 0.91042Training Step:180,Training Loss = 0.275085,Training Accuracy = 0.93300Training Step:180,Validation Loss = 0.296871,Validation Accuracy = 0.91513Training Step:190,Training Loss = 0.261072,Training Accuracy = 0.92800Training Step:190,Validation Loss = 0.287202,Validation Accuracy = 0.91722Training Step:200,Training Loss = 0.292294,Training Accuracy = 0.92600Training Step:200,Validation Loss = 0.285140,Validation Accuracy = 0.91825Training Step:210,Training Loss = 0.251950,Training Accuracy = 0.92700Training Step:210,Validation Loss = 0.276896,Validation Accuracy = 0.91811Training Step:220,Training Loss = 0.265602,Training Accuracy = 0.92500Training Step:220,Validation Loss = 0.277241,Validation Accuracy = 0.91967Training Step:230,Training Loss = 0.227735,Training Accuracy = 0.93200Training Step:230,Validation Loss = 0.264252,Validation Accuracy = 0.92404Training Step:240,Training Loss = 0.249425,Training Accuracy = 0.92800Training Step:240,Validation Loss = 0.256011,Validation Accuracy = 0.92555Training Step:250,Training Loss = 0.245613,Training Accuracy = 0.92700Training Step:250,Validation Loss = 0.252154,Validation Accuracy = 0.92755Training Step:260,Training Loss = 0.244577,Training Accuracy = 0.92700Training Step:260,Validation Loss = 0.255327,Validation Accuracy = 0.92745Training Step:270,Training Loss = 0.197836,Training Accuracy = 0.94400Training Step:270,Validation Loss = 0.244259,Validation Accuracy = 0.93122Training Step:280,Training Loss = 0.219606,Training Accuracy = 0.93400Training Step:280,Validation Loss = 0.235115,Validation Accuracy = 0.93300Training Step:290,Training Loss = 0.254273,Training Accuracy = 0.92900Training Step:290,Validation Loss = 0.234082,Validation Accuracy = 0.93320Training Step:300,Training Loss = 0.224192,Training Accuracy = 0.93900Training Step:300,Validation Loss = 0.228814,Validation Accuracy = 0.93465Training Step:310,Training Loss = 0.223901,Training Accuracy = 0.93800Training Step:310,Validation Loss = 0.224006,Validation Accuracy = 0.93533Training Step:320,Training Loss = 0.223280,Training Accuracy = 0.93700Training Step:320,Validation Loss = 0.233053,Validation Accuracy = 0.93336Training Step:330,Training Loss = 0.210525,Training Accuracy = 0.93900Training Step:330,Validation Loss = 0.229443,Validation Accuracy = 0.93311Training Step:340,Training Loss = 0.200281,Training Accuracy = 0.95100Training Step:340,Validation Loss = 0.211338,Validation Accuracy = 0.94042Training Step:350,Training Loss = 0.185655,Training Accuracy = 0.94800Training Step:350,Validation Loss = 0.207397,Validation Accuracy = 0.94004Training Step:360,Training Loss = 0.176789,Training Accuracy = 0.95300Training Step:360,Validation Loss = 0.213556,Validation Accuracy = 0.93793Training Step:370,Training Loss = 0.194035,Training Accuracy = 0.95100Training Step:370,Validation Loss = 0.209075,Validation Accuracy = 0.94058Training Step:380,Training Loss = 0.177737,Training Accuracy = 0.94600Training Step:380,Validation Loss = 0.199079,Validation Accuracy = 0.94295Training Step:390,Training Loss = 0.189781,Training Accuracy = 0.94700Training Step:390,Validation Loss = 0.196476,Validation Accuracy = 0.94389Training Step:400,Training Loss = 0.157569,Training Accuracy = 0.95400Training Step:400,Validation Loss = 0.190233,Validation Accuracy = 0.94669Training Step:410,Training Loss = 0.193387,Training Accuracy = 0.94700Training Step:410,Validation Loss = 0.192428,Validation Accuracy = 0.94595Training Step:420,Training Loss = 0.167326,Training Accuracy = 0.94900Training Step:420,Validation Loss = 0.183852,Validation Accuracy = 0.94784Training Step:430,Training Loss = 0.169627,Training Accuracy = 0.94900Training Step:430,Validation Loss = 0.184817,Validation Accuracy = 0.94831Training Step:440,Training Loss = 0.142650,Training Accuracy = 0.96000Training Step:440,Validation Loss = 0.179975,Validation Accuracy = 0.94815Training Step:450,Training Loss = 0.165104,Training Accuracy = 0.95400Training Step:450,Validation Loss = 0.178593,Validation Accuracy = 0.94971Training Step:460,Training Loss = 0.166802,Training Accuracy = 0.95200Training Step:460,Validation Loss = 0.184537,Validation Accuracy = 0.94616Training Step:470,Training Loss = 0.168617,Training Accuracy = 0.94400Training Step:470,Validation Loss = 0.170933,Validation Accuracy = 0.95007Training Step:480,Training Loss = 0.167679,Training Accuracy = 0.95600Training Step:480,Validation Loss = 0.174422,Validation Accuracy = 0.94875Training Step:490,Training Loss = 0.181939,Training Accuracy = 0.95200Training Step:490,Validation Loss = 0.165436,Validation Accuracy = 0.95280Training Step:500,Training Loss = 0.144874,Training Accuracy = 0.96800Training Step:500,Validation Loss = 0.164057,Validation Accuracy = 0.95333Training Step:510,Training Loss = 0.148213,Training Accuracy = 0.96100Training Step:510,Validation Loss = 0.160180,Validation Accuracy = 0.95447Training Step:520,Training Loss = 0.175561,Training Accuracy = 0.95000Training Step:520,Validation Loss = 0.160395,Validation Accuracy = 0.95522Training Step:530,Training Loss = 0.160942,Training Accuracy = 0.96100Training Step:530,Validation Loss = 0.162960,Validation Accuracy = 0.95365Training Step:540,Training Loss = 0.156576,Training Accuracy = 0.95900Training Step:540,Validation Loss = 0.161946,Validation Accuracy = 0.95300Training Step:550,Training Loss = 0.141174,Training Accuracy = 0.96200Training Step:550,Validation Loss = 0.155826,Validation Accuracy = 0.95633训练完毕!Testing Samples Count: 10000Testing Loss: 0.146195202125Testing Accuracy: 0.958600003719Process finished with exit code 0
learning_rate,0.001,training_epochs,1,batch_size,100,display_step,10train_step,train_loss,validation_loss,train_step,train_accuracy,validation_accuracy10,1.75667169359,1.77208096071,10,0.50777777036,0.50699999614220,1.29376046658,1.31992321231,20,0.697000008821,0.68670909285530,0.896758031845,0.980805812966,30,0.761000001431,0.73381818294540,0.747110408545,0.752963353612,40,0.787999987602,0.78932727000950,0.619198989868,0.618798954758,50,0.833999997377,0.82218181675160,0.486958801746,0.536908157305,60,0.868999993801,0.84445454359170,0.448975041509,0.47764532268,70,0.87500000596,0.86503636230180,0.421686440706,0.443735821572,80,0.895000004768,0.87203636472990,0.419850724936,0.421142689857,90,0.880999994278,0.877181819027100,0.360559530556,0.392523150498,100,0.899000000954,0.887527273568110,0.365864303708,0.369760940102,110,0.894999998808,0.894072729132120,0.322989872098,0.359142160822,120,0.900999993086,0.896200001131130,0.287647680938,0.34899306514,130,0.927000004053,0.898763637326140,0.305304309726,0.343316617852,140,0.915000003576,0.896527274847150,0.329766140878,0.324282673773,150,0.910000008345,0.906163636663160,0.321841104329,0.313210240006,160,0.907999992371,0.91038182031170,0.311213597655,0.307120515406,170,0.905999994278,0.910418183478180,0.275084839761,0.296871345626,180,0.933000004292,0.91512727369190,0.261072006822,0.287202163488,190,0.927999997139,0.917218183496200,0.292293788493,0.285139876143,200,0.925999999046,0.918254548203210,0.251949635148,0.276895675158,210,0.926999992132,0.918109092604220,0.265602478385,0.277241312672,220,0.925,0.919672728235230,0.22773502171,0.264251598838,230,0.931999993324,0.924036365314240,0.249424637854,0.256010669063,240,0.928000003099,0.925545456084250,0.245613208413,0.252154431126,250,0.927000004053,0.927545455477260,0.244577239454,0.255326630013,260,0.927000004053,0.927454547123270,0.197835572809,0.24425944667,270,0.943999999762,0.931218182932280,0.219606040418,0.235115273852,280,0.934000003338,0.933000001474290,0.25427274704,0.234082331156,290,0.929000002146,0.933200001608300,0.224192405492,0.228814359429,300,0.938999992609,0.934654547084310,0.22390126586,0.224005843665,310,0.938000005484,0.935327274691320,0.223279970884,0.233053153198,320,0.937000006437,0.933363638141330,0.210524723679,0.229443377229,330,0.93900000453,0.933109092062340,0.200281269848,0.211338338283,340,0.951000010967,0.940418184562350,0.185655494034,0.207396699258,350,0.948000001907,0.940036364902360,0.176789112389,0.213556440994,360,0.95300000906,0.937927274487370,0.194034767896,0.209074827358,370,0.950999999046,0.94058182012380,0.177736717463,0.199078560478,380,0.946000003815,0.94294545748390,0.189780955017,0.196475914913,390,0.946999996901,0.943890910149400,0.157569331676,0.190233499103,400,0.954000002146,0.946690911488410,0.193386981636,0.192427829382,410,0.947000002861,0.945945456136420,0.167326299846,0.183852381808,420,0.948999994993,0.947836366567430,0.169626938552,0.18481717533,430,0.949000000954,0.948309093389440,0.142649560422,0.179975032773,440,0.959999996424,0.948145456097450,0.165103793144,0.178593060903,450,0.954000014067,0.949709091837460,0.166802313179,0.184536530856,460,0.951999992132,0.946163637638470,0.168616726249,0.170933133621,470,0.943999999762,0.950072729371480,0.16767866239,0.174422120235,480,0.956000006199,0.948745456934490,0.181938745081,0.165436192311,490,0.952000004053,0.952800002423500,0.144874412566,0.164056923809,500,0.968000000715,0.953327274973510,0.14821325168,0.160179871124,510,0.96099999547,0.954472729618520,0.175561368465,0.160394938256,520,0.950000011921,0.955218184645530,0.160942206532,0.162960227518,530,0.961000001431,0.95365454793540,0.156576392055,0.161946139187,540,0.959000003338,0.953000001907550,0.141173601896,0.155825799751,550,0.962000006437,0.956327274171test step,loss,0.146195202125,accuracy,0.958600003719

打开evaluate_results.csv,并生成图表,我的是wps中操作的

选中前三栏:
这里写图片描述

插入折线图:
这里写图片描述

插入后:
这里写图片描述

对图表进行编辑:
点击“图表工具-选择数据”
这里写图片描述

这里写图片描述

在系列中将train_step全面的√去掉,因为train_step的数据作为横坐标。
图标如下所示:
这里写图片描述

在修改横坐标,将train_step的数据作为横坐标上的数据

点击轴坐标中的类别的编辑按钮
这里写图片描述

选中train_step栏的所有数据
这里写图片描述

修改后图表:
这里写图片描述

同理制作图表2
这里写图片描述

阅读全文
0 0
原创粉丝点击