TensorFlow实现自编码

来源:互联网 发布:java velocity 编辑:程序博客网 时间:2024/05/16 05:46

自编码简介(CAutoencoder)
稀疏自编码(CSparse Autoencoder)
我式自编码(CStacked Autoencoder)
去嗓自编码(CDenoising Autoencoder)
压缩自编码(Contrative Autoencoder)
这里写图片描述
这里写图片描述
Input: 数据的输入;
Encoder: 编码器;
Code: 输入的一个表示;
Decoder: 解码器;
Reconstruction: input的重建;
Error: 重建数据和input的误差。
自动编码器就是一种尽可能复现输入信号的神经网络;
自动编码器必须捕捉可以代表输入数据的最重要的因素;
类似PCA , 找到可以代表原信息的主要成分。
几种自编码的共同点
自编码的共同点: 是除了预防针对X简单地学习一个恒等函数外, 还
包含在以下两方面取折中。
1 、学习到一个针对X的表示h , x也能通过一个解码器从h 中还原; 需要
注意的是: 这并不需要对所有X都满足, 只满足对那些服从数据分布的X
即可。( 重建误差)
2 、减小模型代表性的能力, 使在尽可能多的输入方向上不敏感。( 模型
的表达能力, 泛化能力? ? )
如何在重建误差和表达能力之间取折中呢?
解决方法: 区分训练样本的哪些变量需要表示。学到一个数据的表示(映射,mapping) , 对流形的方向比较敏感,对正交于流形的方向
不敏感。将在正交于流形的方向产生一个收缩的表示。图中, 黑色的线为流形空间, 向右的绿色箭头与流形相切,蓝色的箭头正交于流形。
这里写图片描述

#!/usr/bin/env python3# -*- coding: utf-8 -*-"""Created on Fri Sep 15 13:50:06 2017Auto-Encoder@author: z"""import numpy as npimport sklearn.preprocessing as prepimport tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_data#xavier initializationdef xavier_init(fan_in, fan_out, constant = 1):    low = -constant * np.sqrt(6.0 / (fan_in + fan_out))    high = constant * np.sqrt(6.0 / (fan_in + fan_out))    return tf.random_uniform((fan_in, fan_out),                   minval=low, maxval=high, dtype=tf.float32)class AdditiveGaussianNoiseAutoencoder(object):    def __init__(self, n_input, n_hidden,                  transfer_function=tf.nn.softplus,                  optimizer = tf.train.AdamOptimizer(),                  scale=0.1 ):        self.n_input = n_input        self.n_hidden = n_hidden        self.transfer = transfer_function        self.scale = tf.placeholder( tf.float32 )        self.training_scale = scale        network_weights = self._initialize_weights()        self.weights = network_weights        # Net Struct        self.x = tf.placeholder( tf.float32,                                 [None, self.n_input] )        self.hidden = self.transfer(                 tf.add( tf.matmul(self.x +                 scale * tf.random_normal(( n_input, ) ),                 self.weights['w1'] ), self.weights['b1'] ))        self.reconstruction = tf.add( tf.matmul(                 self.hidden, self.weights['w2'] ),                 self.weights['b2'] )        #loss        self.cost = 0.5 * tf.reduce_sum( tf.pow(                tf.subtract( self.reconstruction,                 self.x ), 2 ) )        self.optimizer = optimizer.minimize( self.cost )        init = tf.global_variables_initializer()        self.sess = tf.Session()        self.sess.run( init )        print ('begin to run session...')    def _initialize_weights(self):        all_weights = dict()        all_weights['w1'] = tf.Variable( xavier_init(                     self.n_input, self.n_hidden ) )        all_weights['b1'] = tf.Variable( tf.zeros( [self.n_hidden],                    dtype = tf.float32 ) )        all_weights['w2'] = tf.Variable( tf.zeros([self.n_hidden,                    self.n_input], dtype = tf.float32) )        all_weights['b2'] = tf.Variable( tf.zeros( [self.n_input],                    dtype = tf.float32 ) )        return all_weights    def partial_fit(self, X):        cost, opt = self.sess.run( (self.cost, self.optimizer),                 feed_dict = { self.x : X, self.scale : self.training_scale } )        return cost    def calc_total_cost( self, X ):        return self.sess.run( self.cost,                 feed_dict = { self.x : X, self.scale : self.training_scale } )    def transform( self, X ):        return self.sess.run( self.hidden,                 feed_dict = { self.x : X, self.scale : self.training_scale } )    def generate( self, hidden = None ):        if hidden == None:            hidden = np.random.normal( size = self.weights['b1'] )        return self.sess.run( self.reconstruction,                     feed_dict = { self.hidden : hidden } )    def reconstruction( self, X ):        return self.sess.run( self.reconstruction,                     feed_dict = { self.x : X, self.scale : self.training_scale } )    def getWeights( self ):        return self.sess.run( self.weights['w1'] )    def getBiases( self ):        return self.sess.run( self.weights['b1'] )mnist = input_data.read_data_sets( '../MNIST_data', one_hot = True )def standard_scale( X_train, X_test ):    preprocessor = prep.StandardScaler().fit( X_train )    X_train = preprocessor.transform( X_train )    X_test  = preprocessor.transform( X_test )    return X_train, X_testdef get_random_block_from_data( data, batch_size ):    start_index = np.random.randint( 0, len(data) - batch_size )    return data[ start_index : (start_index+batch_size)  ]X_train, X_test  =standard_scale( mnist.train.images, mnist.test.images )n_samples = int( mnist.train.num_examples )training_epochs = 20batch_size = 128display_step = 1autoencoder = AdditiveGaussianNoiseAutoencoder( n_input = 784,                n_hidden = 200,                 transfer_function = tf.nn.softplus,                optimizer = tf.train.AdamOptimizer( learning_rate = 0.0001 ),                scale = 0.01 )for epoch in range( training_epochs ):    avg_cost = 0    total_batch = int( n_samples / batch_size )    for i in range( total_batch ):        batch_xs = get_random_block_from_data( X_train, batch_size )        cost = autoencoder.partial_fit( batch_xs )        avg_cost = cost / n_samples * batch_size    if epoch % display_step == 0:        print( "epoch : %04d, cost = %.9f" % ( epoch+1, avg_cost ) )print( "Total cost : ",  str( autoencoder.calc_total_cost(X_test) ))
原创粉丝点击