Tensorflow框架下实现Mnist数字识别

来源:互联网 发布:顶点软件牛叉诊股 编辑:程序博客网 时间:2024/05/16 17:05

下面是实现数字识别的程序代码,注意相应的路径:

#!/usr/bin/python  
import input_data #方法一,下载好,并保存到指定的路径下
mnist = input_data.read_data_sets('/home/yuan/TestMnist', one_hot=True)
import tensorflow as tf  
import sys  
from tensorflow.examples.tutorials.mnist import input_data  #方法二,网上下载
  
def weight_variable(shape):  
  initial = tf.truncated_normal(shape, stddev=0.1)  
  return tf.Variable(initial)  
  
def bias_variable(shape):  
  initial = tf.constant(0.1, shape=shape)  
  return tf.Variable(initial)  
  
def conv2d(x, W):  
  return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')  
  
def max_pool_2x2(x):  
  return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')  
  
#mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)  
  
sess = tf.InteractiveSession()  
  
x = tf.placeholder("float", shape=[None, 784])  
y_ = tf.placeholder("float", shape=[None, 10])  
  
W = tf.Variable(tf.zeros([784,10]))  
b = tf.Variable(tf.zeros([10]))  
  
W_conv1 = weight_variable([5, 5, 1, 32])  
b_conv1 = bias_variable([32])  
  
x_image = tf.reshape(x, [-1, 28, 28, 1])  
  
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)  
h_pool1 = max_pool_2x2(h_conv1)  
  
W_conv2 = weight_variable([5, 5, 32, 64])  
b_conv2 = bias_variable([64])  
  
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)  
h_pool2 = max_pool_2x2(h_conv2)  
  
# Now image size is reduced to 7*7  
W_fc1 = weight_variable([7 * 7 * 64, 1024])  
b_fc1 = bias_variable([1024])  
  
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])  
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)  
  
keep_prob = tf.placeholder("float")  
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)  
  
W_fc2 = weight_variable([1024, 10])  
b_fc2 = bias_variable([10])  
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)    
  
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))  
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)  
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))  
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))  
sess.run(tf.initialize_all_variables())  
  
for i in range(20000):  
  batch = mnist.train.next_batch(50)  
  if i%100 == 0:  
    train_accuracy = accuracy.eval(feed_dict={  
        x:batch[0], y_: batch[1], keep_prob: 1.0})  
    print "step %d, training accuracy %.3f"%(i, train_accuracy)  
  train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})  
  
print "Training finished"  
  
print "test accuracy %.3f" % accuracy.eval(feed_dict={  
    x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})  

下面是程序的部分结果:

Extracting /home/yuan/TestMnist/train-images-idx3-ubyte.gz
Extracting /home/yuan/TestMnist/train-labels-idx1-ubyte.gz
Extracting /home/yuan/TestMnist/t10k-images-idx3-ubyte.gz
Extracting /home/yuan/TestMnist/t10k-labels-idx1-ubyte.gz
step 0, training accuracy 0.100
step 100, training accuracy 0.800
step 200, training accuracy 0.860
step 300, training accuracy 0.800
step 400, training accuracy 0.980
step 500, training accuracy 0.880
step 600, training accuracy 1.000
step 700, training accuracy 0.960
step 800, training accuracy 0.900
step 900, training accuracy 1.000

..

..

..

..

step 19600, training accuracy 1.000
step 19700, training accuracy 1.000
step 19800, training accuracy 1.000
step 19900, training accuracy 1.000
Training finished
test accuracy 0.993
[Finished in 22256.5s]

0 0
原创粉丝点击