tensorflow31《TensorFlow实战》笔记-04 TensorFlow实现自编码器及多层感知机 code

来源:互联网 发布:mac版中文字体下载 编辑:程序博客网 时间:2024/05/16 10:00

01 自编码器

# 《TensorFlow实战》04 TensorFlow实现自编码器及多层感知机# win10 Tensorflow1.0.1 python3.5.3# CUDA v8.0 cudnn-8.0-windows10-x64-v5.1# filename:sz04.01.py # 自编码器#  https://github.com/tensorflow/models/blob/master/autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py'''git clone https://github.com/tensorflow/models.git tensorflow_models# 代码位置tensorflow_models\autoencoder\AdditiveGaussianNoiseAutoencoderRunner.py# 包括tensorflow_models\autoencoder\autoencoder_models'''import numpy as npimport sklearn.preprocessing as prepimport tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_datafrom autoencoder_models.DenoisingAutoencoder import AdditiveGaussianNoiseAutoencodermnist = input_data.read_data_sets('MNIST_data', one_hot = True)def standard_scale(X_train, X_test):    preprocessor = prep.StandardScaler().fit(X_train)    X_train = preprocessor.transform(X_train)    X_test = preprocessor.transform(X_test)    return X_train, X_testdef get_random_block_from_data(data, batch_size):    start_index = np.random.randint(0, len(data) - batch_size)    return data[start_index:(start_index + batch_size)]X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)n_samples = int(mnist.train.num_examples)training_epochs = 20batch_size = 128display_step = 1autoencoder = AdditiveGaussianNoiseAutoencoder(n_input = 784,                                               n_hidden = 200,                                               transfer_function = tf.nn.softplus,                                               optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),                                               scale = 0.01)for epoch in range(training_epochs):    avg_cost = 0.    total_batch = int(n_samples / batch_size)    # Loop over all batches    for i in range(total_batch):        batch_xs = get_random_block_from_data(X_train, batch_size)        # Fit training using batch data        cost = autoencoder.partial_fit(batch_xs)        # Compute average loss        avg_cost += cost / n_samples * batch_size    # Display logs per epoch step    if epoch % display_step == 0:        print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))'''Epoch: 0001 cost= 20192.045613636Epoch: 0002 cost= 12669.102565909...Epoch: 0018 cost= 7653.716919318Epoch: 0019 cost= 8264.436097727Epoch: 0020 cost= 9477.928647727Total cost: 638381.0'''

02 多层感知机(MLP)

# 《TensorFlow实战》04 TensorFlow实现自编码器及多层感知机# win10 Tensorflow1.0.1 python3.5.3# CUDA v8.0 cudnn-8.0-windows10-x64-v5.1# filename:sz04.02.py # 多层感知机# 多层感知机(MLP)=多层神经网络=全连接神经网络(FCN)import tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets("MNIST_data/", one_hot=True)in_units = 784h1_units = 300out_units = 10base_larning_rate = 0.3W1 = tf.Variable(tf.truncated_normal([in_units, h1_units], stddev=0.1))b1 = tf.Variable(tf.zeros([h1_units]))W2 = tf.Variable(tf.zeros([h1_units, out_units]))b2 = tf.Variable(tf.zeros([out_units]))x = tf.placeholder(tf.float32, [None, in_units])keep_prob = tf.placeholder(tf.float32)hidden1 = tf.nn.relu(tf.matmul(x, W1) + b1)hidden1_drop = tf.nn.dropout(hidden1, keep_prob);y = tf.nn.softmax(tf.matmul(hidden1_drop, W2) + b2);y_ = tf.placeholder(tf.float32, [None, out_units]);cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices = [1]));train_step = tf.train.AdagradOptimizer(base_larning_rate).minimize(cross_entropy);sess = tf.InteractiveSession()tf.global_variables_initializer().run()for i in range(3000):    batch_xs, batch_ys = mnist.train.next_batch(100)    train_step.run({x: batch_xs, y_: batch_ys, keep_prob: 0.75})    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))    if i % 600 == 0 :        print(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))'''0.23360.96480.97180.97760.9772'''
0 0