tensorflow32《TensorFlow实战》笔记-05 TensorFlow实现卷积神经网络 code

来源:互联网 发布:linux ll什么意思 编辑:程序博客网 时间:2024/05/22 17:21

01 简单卷积网络

# 《TensorFlow实战》05 TensorFlow实现卷积神经网络# win10 Tensorflow1.0.1 python3.5.3# CUDA v8.0 cudnn-8.0-windows10-x64-v5.1# filename:sz05.01.py # 简单卷积网络from tensorflow.examples.tutorials.mnist import input_dataimport tensorflow as tfmnist = input_data.read_data_sets("MNIST_data/", one_hot=True)sess = tf.InteractiveSession()def weight_variable(shape):    initial = tf.truncated_normal(shape, stddev=0.1)    return tf.Variable(initial)def bias_variable(shape):    initial = tf.constant(0.1, shape=shape)    return tf.Variable(initial)def conv2d(x, W):    return tf.nn.conv2d(x, W, strides = [1, 1, 1, 1], padding = "SAME")def max_pool_2x2(x):    return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME")x = tf.placeholder(tf.float32, [None, 784])y_ = tf.placeholder(tf.float32, [None, 10])x_image = tf.reshape(x, [-1, 28, 28, 1])W_conv1 = weight_variable([5, 5, 1, 32])b_conv1 = bias_variable([32])h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)h_pool1 = max_pool_2x2(h_conv1)W_conv2 = weight_variable([5, 5, 32, 64])b_conv2 = bias_variable([64])h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)h_pool2 = max_pool_2x2(h_conv2)W_fc1 = weight_variable([7 * 7 * 64, 1024])b_fc1 = bias_variable([1024])h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)keep_prob = tf.placeholder(tf.float32)h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)W_fc2 = weight_variable([1024, 10])b_fc2 = bias_variable([10])y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))tf.global_variables_initializer().run()for i in range(20000):    batch = mnist.train.next_batch(50)    if i % 1000 == 0:        train_accuracy = accuracy.eval(feed_dict = {x: batch[0], y_: batch[1], keep_prob: 1.0})        print("step %d, training accuracy %g" %(i, train_accuracy))    train_step.run(feed_dict = {x: batch[0], y_: batch[1], keep_prob: 0.5})print("test accuracy %g " %accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))'''step 0, training accuracy 0.04step 1000, training accuracy 0.96step 2000, training accuracy 0.92...step 16000, training accuracy 0.98step 17000, training accuracy 1step 18000, training accuracy 1step 19000, training accuracy 1test accuracy 0.9918'''

02 cifar10卷积网络

# 《TensorFlow实战》05 TensorFlow实现卷积神经网络# win10 Tensorflow1.0.1 python3.5.3# CUDA v8.0 cudnn-8.0-windows10-x64-v5.1# filename:sz05.02.py # cifar10卷积网络# cifar10 cifar10_input 可以从tensorflow_models\tutorials\image\cifar10下获得import cifar10, cifar10_inputimport tensorflow as tfimport numpy as npimport timemax_steps = 3000batch_size = 128# http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz# 把cifar-10-binary.tar.gz解压到cifar10_data/cifar10-10-batches-bindata_dir = "cifar10_data/cifar10-10-batches-bin"def variable_with_weight_loss(shape, stddev, wl):    var = tf.Variable(tf.truncated_normal(shape, stddev = stddev))    if wl is not None:        weight_loss = tf.multiply(tf.nn.l2_loss(var), wl, name="weight_loss")        tf.add_to_collection('losses', weight_loss)    return varcifar10.maybe_download_and_extract()images_train, labels_train = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=batch_size)images_test, labels_test = cifar10_input.inputs(eval_data = True, data_dir=data_dir, batch_size=batch_size)image_holder = tf.placeholder(tf.float32, [batch_size, 24, 24, 3])label_holder = tf.placeholder(tf.int32, [batch_size])weight1 = variable_with_weight_loss(shape=[5, 5, 3, 64], stddev = 5e-2, wl = 0.0)kernel1 = tf.nn.conv2d(image_holder, weight1, [1, 1, 1, 1], padding = 'SAME')bias1 = tf.Variable(tf.constant(0.0, shape = [64]))conv1 = tf.nn.relu(tf.nn.bias_add(kernel1, bias1))pool1 = tf.nn.max_pool(conv1, ksize = [1, 3, 3, 1], strides = [1, 2, 2, 1], padding = 'SAME')norm1 = tf.nn.lrn(pool1, 4, bias = 1.0, alpha = 0.001 / 9.0, beta = 0.75)weight2 = variable_with_weight_loss(shape = [5, 5, 64, 64], stddev = 5e-2, wl = 0.0)kernel2 = tf.nn.conv2d(norm1, weight2, [1, 1, 1, 1], padding = 'SAME')bias2 = tf.Variable(tf.constant(0.1, shape = [64]))conv2 = tf.nn.relu(tf.nn.bias_add(kernel2, bias2))norm2 = tf.nn.lrn(conv2, 4, bias = 1.0, alpha = 0.001 / 9.0, beta = 0.75)pool2 = tf.nn.max_pool(norm2, ksize = [1, 3, 3, 1], strides = [1, 2, 2, 1], padding = 'SAME')reshape = tf.reshape(pool2, [batch_size, -1])dim = reshape.get_shape()[1].valueweight3 = variable_with_weight_loss(shape = [dim, 384], stddev = 0.04, wl = 0.004)bias3 = tf.Variable(tf.constant(0.1, shape = [384]))local3 = tf.nn.relu(tf.matmul(reshape, weight3) + bias3)weight4 = variable_with_weight_loss(shape = [384, 192], stddev = 0.04, wl = 0.004)bias4 = tf.Variable(tf.constant(0.1, shape = [192]))local4 = tf.nn.relu(tf.matmul(local3, weight4) + bias4)weight5 = variable_with_weight_loss(shape = [192, 10], stddev = 1 / 192.0, wl = 0.0)bias5 = tf.Variable(tf.constant(0.0, shape = [10]))logits = tf.add(tf.matmul(local4, weight5), bias5)def loss(logits, labels):    labels = tf.cast(labels, tf.int64)    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(        logits = logits, labels = labels, name = 'cross_entropy_per_example')    cross_entropy_mean = tf.reduce_mean(cross_entropy, name = 'cross_entropy')    tf.add_to_collection('losses', cross_entropy_mean)    return tf.add_n(tf.get_collection('losses'), name = 'total_loss')loss = loss(logits, label_holder)train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)top_k_op = tf.nn.in_top_k(logits, label_holder, 1)sess = tf.InteractiveSession()tf.global_variables_initializer().run()tf.train.start_queue_runners()for step in range(max_steps):    start_time = time.time()    image_batch, label_batch = sess.run([images_train, labels_train])    _, loss_value = sess.run([train_op, loss], feed_dict = {image_holder: image_batch, label_holder: label_batch})    duration = time.time() - start_time    if step % 10 == 0:        examples_per_sec = batch_size / duration        sec_per_batch = float(duration)        format_str=('step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)')        print(format_str % (step, loss_value, examples_per_sec, sec_per_batch))num_examples = 10000import mathnum_iter = int(math.ceil(num_examples / batch_size))true_count = 0total_sample_count = num_iter * batch_sizestep = 0while step < num_iter:    image_batch, label_batch = sess.run([images_test, labels_test])    predictions = sess.run([top_k_op], feed_dict = {image_holder: image_batch, label_holder:label_batch})    true_count += np.sum(predictions)    step += 1predictions = true_count / total_sample_countprint('precision @ 1 = %.3f' % predictions)'''step 0, loss = 4.67 (6.0 examples/sec; 21.268 sec/batch)step 10, loss = 3.65 (773.7 examples/sec; 0.165 sec/batch)...step 2970, loss = 0.95 (877.4 examples/sec; 0.146 sec/batch)step 2980, loss = 1.12 (862.6 examples/sec; 0.148 sec/batch)step 2990, loss = 1.06 (967.1 examples/sec; 0.132 sec/batch)precision @ 1 = 0.705'''
0 0
原创粉丝点击