Tensorflow 实现CNN

来源:互联网 发布:英语语法 视频 知乎 编辑:程序博客网 时间:2024/05/22 09:44

利用Tensorflow实现CNN,CNN的原理我就不再多说,直接上代码
这里写图片描述

#利用CNN实现一个mnist识别import tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_data#1到10的数字mnist = input_data.read_data_sets('MNIST_data',one_hot = True)#计算准确率def compute_accuracy(v_xs,v_ys):    #定义一个全局变量的准确值    global prediction    y_pre = sess.run(prediction,feed_dict = {xs:v_xs,keep_prob:1})    correct_prediction = tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1))    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))    result = sess.run(accuracy,feed_dict = {xs:v_xs,ys:v_ys,keep_prob:1})    return result#定义一个权值的函数def weight_variable(shape):    #生成一个随机值    initial = tf.truncated_normal(shape,stddev = 0.1)    return tf.Variable(initial)#定义一个偏置量def bias_variable(shape):    initial = tf.constant(0.1,shape = shape)    return tf.Variable(initial)def conv2d(x,W):    #strides代表步长,在x方向和y方向都是1    #conv2d这个函数的第一个输入为当前层的节点矩阵,即image,第二个参数提供了卷积层的权重,第三个参数为不同维度的步长    return tf.nn.conv2d(x,W,strides = [1,1,1,1],padding ='SAME')def max_pool_2x2(x):    #池化层是为了进一步的减少参数量    #ksize提供了过滤器的尺寸,stride提供了步长信息,padding提供了是否使用全0填充    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding = 'SAME')#定义placeholderxs = tf.placeholder(tf.float32,[None,784])#这里是28x28ys = tf.placeholder(tf.float32,[None,10])#keep_prob表示使用drop_out时保留的概率keep_prob = tf.placeholder(tf.float32)x_image = tf.reshape(xs,[-1,28,28,1])#print(x_image,shape)#第一个卷积层W_conv1 = weight_variable([5,5,1,32])b_conv1 = bias_variable([32])#搭建,并进行非线性的处理h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1) + b_conv1)#size 28x28x32h_pool1 = max_pool_2x2(h_conv1) #输出的维度为14x14x32#第二个卷积层W_conv2 = weight_variable([5,5,32,64])b_conv2 = bias_variable([64])h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)#这里变为14x14x64h_pool2 = max_pool_2x2(h_conv2)#变成7x7x64#然后是全连接层W_fc1 = weight_variable([7*7*64,1024])#隐藏层结点数为1024b_fc1 = bias_variable([1024])#将第二层pool后的数据变化为一维的数据h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1) + b_fc1)#加一个dropout的处理h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)#第二层的全连接层W_fc2 = weight_variable([1024,10])b_fc2 = bias_variable([10])#利用softmax计算预测值概率prediction = tf.nn.softmax(tf.matmul(h_fc1,W_fc2) + b_fc2)#计算输出误差cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),                                              reduction_indices = [1]))train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)sess = tf.Session()sess.run(tf.global_variables_initializer())for i in range(1000):    batch_xs,batch_ys = mnist.train.next_batch(100)    sess.run(train_step,feed_dict = {xs:batch_xs,ys:batch_ys,keep_prob:1})    if i %50 == 0:        print(compute_accuracy(mnist.test.images,mnist.test.labels))
原创粉丝点击