tensorflow Examples:<2>实现卷积神经网络

来源:互联网 发布:nginx fastcgi配置 编辑:程序博客网 时间:2024/05/16 23:36

一个使用tensorflow实现简单卷积神经网络的例子。

#coding: utf-8'''os: windows 64env: python 3.6tensorflow: 1.1.0ide: jupyter notebook'''from tensorflow.examples.tutorials.mnist import input_dataimport tensorflow as tfmnist = input_data.read_data_sets('MNIST_data/', one_hot=True)sess = tf.InteractiveSession()def weight_variable(shape):    initial = tf.truncated_normal(shape, stddev=0.1)    return tf.Variable(initial)def bias_variable(shape):    initial = tf.constant(0.1, shape=shape)    return tf.Variable(initial)def conv2d(x, W):    return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding="SAME")def max_pool_2x2(x):x = tf.placeholder(tf.float32, [None, 784])y_ = tf.placeholder(tf.float32, [None, 10])x_image = tf.reshape(x, [-1, 28, 28, 1])W_conv1 = weight_variable([5,5,1,32])b_conv1 = bias_variable([32])h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)h_pool1 = max_pool_2x2(h_conv1)W_conv2 = weight_variable([5,5,32,64])b_conv2 = bias_variable([64])h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2)+b_conv2)h_pool2 = max_pool_2x2(h_conv2)W_fc1 = weight_variable([7*7*64, 1024])b_fc1 = bias_variable([1024])h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1)+b_fc1)keep_prob = tf.placeholder(tf.float32)h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)W_fc2 = weight_variable([1024, 10])b_fc2 = bias_variable([10])y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2)+b_fc2)cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y_conv),reduction_indices=[1]))train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))tf.global_variables_initializer().run()for i in range(20000):    batch = mnist.train.next_batch(50)    if i % 100 == 0:        train_accuracy = accuracy.eval(feed_dict={x: batch[0],                                                  y_: batch[1],                                                  keep_prob: 1.0})        print ("Step %05d, training accuracy %g" % (i, train_accuracy))    train_step.run(feed_dict={x: batch[0],                              y_:batch[1],                              keep_prob: 0.5})print('test accuracy %g'%accuracy.eval(feed_dict={x: mnist.test.images,                                                  y_: mnist.test.labels,                                                  keep_prob: 1.0}))sess.close()

另一种风格:

    import tensorflow as tf    # Import MNIST data    from tensorflow.examples.tutorials.mnist import input_data    mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)    # Parameters    learning_rate = 0.001    training_iters = 200000    batch_size = 128    display_step = 10    # Network Parameters    n_input = 784 # MNIST data input (img shape: 28*28)    n_classes = 10 # MNIST total classes (0-9 digits)    dropout = 0.75 # Dropout, probability to keep units    # tf Graph input    x = tf.placeholder(tf.float32, [None, n_input])    y = tf.placeholder(tf.float32, [None, n_classes])    keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)    # Create some wrappers for simplicity    def conv2d(x, W, b, strides=1):        # Conv2D wrapper, with bias and relu activation        x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')        x = tf.nn.bias_add(x, b)        return tf.nn.relu(x)    def maxpool2d(x, k=2):        # MaxPool2D wrapper        return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],                              padding='SAME')    # Create model    def conv_net(x, weights, biases, dropout):        # Reshape input picture        x = tf.reshape(x, shape=[-1, 28, 28, 1])        # Convolution Layer        conv1 = conv2d(x, weights['wc1'], biases['bc1'])        # Max Pooling (down-sampling)        conv1 = maxpool2d(conv1, k=2)        # Convolution Layer        conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])        # Max Pooling (down-sampling)        conv2 = maxpool2d(conv2, k=2)        # Fully connected layer        # Reshape conv2 output to fit fully connected layer input        fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])        fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])        fc1 = tf.nn.relu(fc1)        # Apply Dropout        fc1 = tf.nn.dropout(fc1, dropout)        # Output, class prediction        out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])        return out    # Store layers weight & bias    weights = {        # 5x5 conv, 1 input, 32 outputs        'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),        # 5x5 conv, 32 inputs, 64 outputs        'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),        # fully connected, 7*7*64 inputs, 1024 outputs        'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),        # 1024 inputs, 10 outputs (class prediction)        'out': tf.Variable(tf.random_normal([1024, n_classes]))    }    biases = {        'bc1': tf.Variable(tf.random_normal([32])),        'bc2': tf.Variable(tf.random_normal([64])),        'bd1': tf.Variable(tf.random_normal([1024])),        'out': tf.Variable(tf.random_normal([n_classes]))    }    # Construct model    pred = conv_net(x, weights, biases, keep_prob)    # Define loss and optimizer    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)    # Evaluate model    correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))    # Initializing the variables    init = tf.global_variables_initializer()    # Launch the graph    with tf.Session() as sess:        sess.run(init)        step = 1        # Keep training until reach max iterations        while step * batch_size < training_iters:            batch_x, batch_y = mnist.train.next_batch(batch_size)            # Run optimization op (backprop)            sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,                                           keep_prob: dropout})            if step % display_step == 0:                # Calculate batch loss and accuracy                loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,                                                                  y: batch_y,                                                                  keep_prob: 1.})                print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \                      "{:.6f}".format(loss) + ", Training Accuracy= " + \                      "{:.5f}".format(acc))            step += 1        print("Optimization Finished!")        # Calculate accuracy for 256 mnist test images        print("Testing Accuracy:", \            sess.run(accuracy, feed_dict={x: mnist.test.images[:256],                                          y: mnist.test.labels[:256],                                          keep_prob: 1.}))