Tensorflow-快速搭建CNN

来源:互联网 发布:淘宝下载 编辑:程序博客网 时间:2024/05/21 06:25

快速搭建CNN

#!/usr/bin/python3# -*- coding:utf-8 -*-import tensorflow as tfimport numpy as npimport matplotlib.pyplot as pltfrom tensorflow.contrib.layers.python.layers import fully_connected,convolution2dfrom tensorflow.examples.tutorials.mnist import input_data# from tensorflow.python.framework import ops# ops.reset_default_graph()"""cnn"""mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)# 定义一些参数batch_size = 128droup_out = 0.7learn_rate = 0.001num_steps = 100000disp_step = 2000with tf.Graph().as_default() as graph:    # mnist图像大小是28x28 分成0~9 共10类    x=tf.placeholder(tf.float32,[None,28*28*1])    y_=tf.placeholder(tf.float32,[None,10])    keep=tf.placeholder(tf.float32)    x_img=tf.reshape(x,[-1,28,28,1])    # convolution1    """    conv1=tf.layers.conv2d(        tf.image.convert_image_dtype(x_img,dtype=tf.float32),        filters=32, # 输出通道由1->32        kernel_size=(3,3), # 3x3卷积核        activation=tf.nn.relu,        padding='SAME',        kernel_initializer=tf.random_uniform_initializer,        bias_initializer=tf.random_normal_initializer    )    """    conv1=convolution2d(        tf.image.convert_image_dtype(x_img,dtype=tf.float32),        num_outputs=32,        kernel_size=(3,3),        activation_fn=tf.nn.relu,        normalizer_fn=tf.layers.batch_normalization,        weights_initializer=tf.random_uniform_initializer,        biases_initializer=tf.random_normal_initializer,        trainable=True    )    conv1=tf.nn.max_pool(conv1,[1,2,2,1],[1,2,2,1],padding="SAME") # [n,14,14,32]    # convolution2    """    conv2=tf.layers.conv2d(        conv1,        filters=64, # 输出通道由1->32        kernel_size=(3,3), # 3x3卷积核        activation=tf.nn.relu,        padding='SAME',        kernel_initializer=tf.random_uniform_initializer,        bias_initializer=tf.random_normal_initializer    )    """    conv2=convolution2d(        conv1,        num_outputs=64,        kernel_size=(3,3),        activation_fn=tf.nn.relu,        normalizer_fn=tf.layers.batch_normalization,        weights_initializer=tf.random_uniform_initializer,        biases_initializer=tf.random_normal_initializer,        trainable=True    )    conv2=tf.nn.max_pool(conv2,[1,2,2,1],[1,2,2,1],padding="SAME") # [n,7,7,64]    # full connect    fc1=tf.reshape(conv2,[-1,7*7*64])    fc1=fully_connected(        fc1,        num_outputs=512,        activation_fn=tf.nn.relu,        normalizer_fn=tf.layers.batch_normalization,        weights_initializer=tf.random_uniform_initializer,        biases_initializer=tf.random_normal_initializer,        weights_regularizer=tf.nn.l2_loss,        biases_regularizer=tf.nn.l2_loss,    ) # [N,512]    fc1=tf.nn.dropout(fc1,keep)    y=fully_connected(        fc1,        num_outputs=10,        activation_fn=tf.nn.softmax,        normalizer_fn=tf.layers.batch_normalization,        weights_initializer=tf.random_uniform_initializer,        biases_initializer=tf.random_normal_initializer,        weights_regularizer=tf.nn.l2_loss,        biases_regularizer=tf.nn.l2_loss,    ) # [N,10]    loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y))    train_op=tf.train.AdamOptimizer(learn_rate).minimize(loss)    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))    # Calculate accuracy    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))sess=tf.InteractiveSession(graph=graph)tf.global_variables_initializer().run()for step in range(num_steps):    batch_xs, batch_ys = mnist.train.next_batch(batch_size)    train_op.run({x:batch_xs,y_:batch_ys,keep:droup_out})    if step % disp_step==0:        print("step",step,'acc',accuracy.eval({x:batch_xs,y_:batch_ys,keep:droup_out}),'loss',loss.eval({x:batch_xs,y_:batch_ys,keep:droup_out}))# test accprint('test acc',accuracy.eval({x:mnist.test.images,y_:mnist.test.labels,keep:1.}))sess.close()

结果:
这里写图片描述

其他方法:

#!/usr/bin/python3# -*- coding:utf-8 -*-from __future__ import absolute_importfrom __future__ import divisionfrom __future__ import print_functionimport tensorflow as tfimport numpy as npimport matplotlib.pyplot as pltfrom tensorflow.contrib.layers.python.layers import batch_normfrom tensorflow.examples.tutorials.mnist import input_data# from tensorflow.python.framework import ops# ops.reset_default_graph()"""cnn"""mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)# 定义一些参数batch_size = 128droup_out = 0.7learn_rate = 0.1num_steps = 10disp_step = 2with tf.Graph().as_default() as graph:    # mnist图像大小是28x28 分成0~9 共10类    x=tf.placeholder(tf.float32,[None,28*28*1])    y_=tf.placeholder(tf.float32,[None,10])    keep=tf.placeholder(tf.float32)    is_training = tf.placeholder(tf.bool, name='MODE')    x_img=tf.reshape(x,[-1,28,28,1])    def batch_norm_layer(inputT, is_training=True, scope=None):        # Note: is_training is tf.placeholder(tf.bool) type        return tf.cond(is_training,                       lambda: batch_norm(inputT, is_training=True,                                          center=True, scale=True, activation_fn=tf.nn.relu, decay=0.9, scope=scope),                       lambda: batch_norm(inputT, is_training=False,                                          center=True, scale=True, activation_fn=tf.nn.relu, decay=0.9,                                          scope=scope, reuse = True))    def conv2d(input, kernel_size, input_size, output_size, is_training, name):        with tf.name_scope(name) as scope:            with tf.variable_scope(name):                # scope.reuse_variables()                w = tf.get_variable('w', [kernel_size, kernel_size, input_size, output_size], tf.float32,                                    initializer=tf.random_uniform_initializer) * 0.001                b = tf.get_variable('b', [output_size], tf.float32, initializer=tf.random_normal_initializer) + 0.1                conv = tf.nn.conv2d(input, w, [1, 1, 1, 1], padding="SAME")                conv = tf.nn.bias_add(conv, b)                conv = batch_norm_layer(conv, is_training, scope)                conv = tf.nn.relu(conv)        return conv    def fc_layer(input,input_size,output_size,is_training,name):        with tf.name_scope(name) as scope:            with tf.variable_scope(name):                w = tf.get_variable('w', [input_size, output_size], tf.float32,                                    initializer=tf.random_uniform_initializer) * 0.001                b = tf.get_variable('b', [output_size], tf.float32, initializer=tf.random_normal_initializer) + 0.1                fc=tf.nn.bias_add(tf.matmul(input,w),b)                fc=batch_norm_layer(fc,is_training,scope)                # fc = tf.nn.relu(fc)                return fc    # convolution1    conv1 = conv2d(tf.image.convert_image_dtype(x_img, tf.float32),                   3, 1, 32, is_training, 'conv1')    conv1 = tf.nn.max_pool(conv1, [1, 2, 2, 1], [1, 2, 2, 1], padding="SAME")  # [n,14,14,32]    conv1 = tf.nn.lrn(conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')    conv1 = tf.nn.dropout(conv1, keep)    # convolution2    conv2 = conv2d(conv1,                   3, 32, 64, is_training, 'conv2')    conv2 = tf.nn.max_pool(conv2, [1, 2, 2, 1], [1, 2, 2, 1], padding="SAME")  # [n,7,7,64]    conv2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')    conv2 = tf.nn.dropout(conv2, keep)    # full connect    fc1=tf.reshape(conv2,[-1,7*7*64])    fc1=fc_layer(fc1,7*7*64,512,is_training,'fc1')    fc1=tf.nn.relu(fc1)    fc1=tf.nn.dropout(fc1,keep)    fc2=fc_layer(fc1,512,10,is_training,'output')    y=tf.nn.softmax(fc2)    loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y))    train_op=tf.train.AdamOptimizer(learn_rate).minimize(loss)    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))    # Calculate accuracy    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))sess=tf.InteractiveSession(graph=graph)tf.global_variables_initializer().run()for step in range(num_steps):    batch_xs, batch_ys = mnist.train.next_batch(batch_size)    train_op.run({x:batch_xs,y_:batch_ys,keep:droup_out,is_training:True})    if step % disp_step==0:        print("step",step,'acc',accuracy.eval({x:batch_xs,y_:batch_ys,keep:droup_out,is_training:True}),              'loss',loss.eval({x:batch_xs,y_:batch_ys,keep:droup_out,is_training:True}))# test accprint('test acc',accuracy.eval({x:mnist.test.images,y_:mnist.test.labels,keep:1.,is_training:False}))sess.close()