TensorFlow实现自编码器AutoEncoer

来源:互联网 发布:淘宝数据魔方官网 编辑:程序博客网 时间:2024/06/05 09:44
#%%# Copyright 2015 The TensorFlow Authors. All Rights Reserved.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at##     http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# ==============================================================================import numpy as npimport sklearn.preprocessing as prepimport tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_data#创建均匀分布def xavier_init(fan_in, fan_out, constant = 1):    low = -constant * np.sqrt(6.0 / (fan_in + fan_out))    high = constant * np.sqrt(6.0 / (fan_in + fan_out))    return tf.random_uniform((fan_in, fan_out),                             minval = low, maxval = high,                             dtype = tf.float32)#一个去噪自编码的类class AdditiveGaussianNoiseAutoencoder(object):#一直无法理解self是什么意思    def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),                 scale = 0.1):        self.n_input = n_input        self.n_hidden = n_hidden        self.transfer = transfer_function#隐含层激活函数,默认为softplus        self.scale = tf.placeholder(tf.float32)        self.training_scale = scale        network_weights = self._initialize_weights()        self.weights = network_weights        # model#首先为输入x创建一个维度为n_input的placeholder,然后建立一个能提取特征的隐含层。        self.x = tf.placeholder(tf.float32, [None, self.n_input])#tf.matmul将加了噪声的输入与隐含层的权重相乘   #tf.random_normal为随机噪声#之后再加上偏置b1        self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)),                self.weights['w1']),                self.weights['b1']))#在数据层进行数据复原与重建操作,即建立reconstruction层        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])        #cost定义自编码器的损失函数,这里使用平方误差#0.5这个因数没看懂        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))#定义优化器self.optimizer对损失self.cost进行优化        self.optimizer = optimizer.minimize(self.cost)#创建Session,并初始化自编码器的所有参数模型#这里使用tf.global_variables_initializer()添加节点用于初始化所有的变量。在你构建完整个模型并在会话中加载模型后,运行这个节点。        init = tf.global_variables_initializer()        self.sess = tf.Session()        self.sess.run(init)#参数初始化函数    def _initialize_weights(self):#先建立一个名为all_weights的字典dict,然后将        all_weights = dict()#其中w1使用前面的xavier_init进行初始化,我们直接传入输入节点数和隐含层及诶点数,然后xavier就会#返回一个比较适合softplus激活函数的权重初始分布        all_weights['w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden))#偏置b1只需要使用tf.zeros全部置为0即可。        all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype = tf.float32))#对于输出层self.reconstruction,因为没有使用激活函数,只需要使用tf.zeros全部置为0即可。        all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype = tf.float32))        all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype = tf.float32))        return all_weights#下面定义计算cost并且执行一步训练的函数 partial_fit(部分拟合) #该函数就是用一个batch数据进行训练并返回当前的损失cost    def partial_fit(self, X):#让Session执行两个计算图的节点,分别是损失cost和训练过程optinizer        cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict = {self.x: X,                                                                            self.scale: self.training_scale                                                                            })        return cost#下面定义一个只计算cost的函数,它不会像partial_fit函数那样触发训练操作,用于测试集上对模型性能进行评测时使用    def calc_total_cost(self, X):        return self.sess.run(self.cost, feed_dict = {self.x: X,                                                     self.scale: self.training_scale                                                     })#transform函数返回自编码器隐含层的输出结果。它目的是用来获取抽象后的特征,学习出数据中的高阶特征    def transform(self, X):        return self.sess.run(self.hidden, feed_dict = {self.x: X,                                                       self.scale: self.training_scale                                                       })#generate函数用于将高阶特征复原为原始数据    def generate(self, hidden = None):        if hidden is None:            hidden = np.random.normal(size = self.weights["b1"])        return self.sess.run(self.reconstruction, feed_dict = {self.hidden: hidden})#reconstruct函数,整体运行一遍复原过程,包括提取高阶特征和通过高阶特征复原数据,包括了transform和generate两部分    def reconstruct(self, X):        return self.sess.run(self.reconstruction, feed_dict = {self.x: X,                                                               self.scale: self.training_scale                                                               })#获取隐含层的权重w1    def getWeights(self):        return self.sess.run(self.weights['w1'])#获取隐含层的偏置b1    def getBiases(self):        return self.sess.run(self.weights['b1'])                                mnist = input_data.read_data_sets('MNIST_data', one_hot = True)#定义一个对训练.测试数据进行标准化处理的函数。#标准化def standard_scale(X_train, X_test):#先在训练集上进行fit(匹配)    preprocessor = prep.StandardScaler().fit(X_train)    X_train = preprocessor.transform(X_train)    X_test = preprocessor.transform(X_test)    return X_train, X_test#定义一个获取随机block数据的函数:取一个从0到len(data)减去batch_size之间的随机站等数,向后顺序取数据def get_random_block_from_data(data, batch_size):    start_index = np.random.randint(0, len(data) - batch_size)    return data[start_index:(start_index + batch_size)]#使用之前定义的standard_scale函数对训练集,数据集进行标准化变换X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)n_samples = int(mnist.train.num_examples)#总训练样本training_epochs = 20#最大训练轮数batch_size = 128#batch_size设为128display_step = 1#每隔一轮epoch就显示一次损失cost#上面完成了整体框架建设#现在创建一个AGN自编码器的实例autoencoder = AdditiveGaussianNoiseAutoencoder(n_input = 784,#输入节点数                                               n_hidden = 200,#输出节点数                                               transfer_function = tf.nn.softplus,#激活函数为softplus                                               optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),#学习速率                                               scale = 0.01)#噪声的系数for epoch in range(training_epochs):    avg_cost = 0.#每轮将平均损失avg_cost设为0    total_batch = int(n_samples / batch_size)#计算总共需要的batch数    # Loop over all batches          #循环total_batch次    for i in range(total_batch):        batch_xs = get_random_block_from_data(X_train, batch_size)        # Fit training using batch data        cost = autoencoder.partial_fit(batch_xs)        # Compute average loss        avg_cost += cost / n_samples * batch_size #(先除后乘)# Display logs per epoch step#每次显示一次    if epoch % display_step == 0:        print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))计算测试集的误差print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))


原创粉丝点击