theano-xnor-net代码注释7 inf_layers.py
来源:互联网 发布:中国进口贸易数据分析 编辑:程序博客网 时间:2024/05/22 10:41
import theanoimport theano.tensor as Timport numpy as npimport lasagnefrom fxp_helper import to_fixed_point_theano# Classic batch normalization layer with fixed point simulation feature for the output dataclass BatchNormLayer(lasagne.layers.BatchNormLayer): """Class to override the lasagne batch norm layer. This is only implemetend for inference. This basically clubs the normalization parameters and pre-multiplies them. """ def __init__(self, incoming, format='float', data_bits=15, int_bits=0, **kwargs): super(BatchNormLayer, self).__init__(incoming, **kwargs) # club mean, gamma, inv_std sub = -self.mean.get_value() * self.inv_std.get_value() * self.gamma.get_value() + self.beta.get_value() scale = self.gamma.get_value() * self.inv_std.get_value() # The below 2 params are just combination of layer params used during training. # This is just to precompute the data independent products and keep it aside. Also, the range of # clubbed params is less compared to isolated params. This helps to reduce the # of integer bits used to represent # them in the fixed point #self.add_param()就是将类属性放置在self.param()类属性中,在之后可以通过.get_all_params()来调出该曾的所有存放在self.param()中的类属性 self.sub = self.add_param(sub, sub.shape, name='sub') self.scale = self.add_param(scale, scale.shape, name='scale') self.format = format # FIXME: The below variables need to be theano shared variables? self.data_bits = theano.shared(data_bits) self.int_bits = theano.shared(int_bits) def get_output_for(self, input, deterministic=False, **kwargs): """Override the lasagne implementation only during the inference time. """ #这个函数只存在于测试阶段,训练阶段还是用lasagne assert(deterministic), 'This layer is only implemented for inference. Use direct Lasagne implementation during training' # prepare dimshuffle pattern inserting broadcastable axes as needed param_axes = iter(range(input.ndim - len(self.axes))) pattern = ['x' if input_axis in self.axes else next(param_axes) for input_axis in range(input.ndim)] scale = self.scale.dimshuffle(pattern) sub = self.sub.dimshuffle(pattern) if(self.format == 'fixed'): # assuming the parameters are already simulated for respective fixed point formats. full_precision_out = input * scale + sub return to_fixed_point_theano(full_precision_out, self.data_bits, self.int_bits) else: return input * scale + sub# Classic Lasagne dense layer with fixed point simulation feature for the output dataclass DenseLayer(lasagne.layers.DenseLayer): def __init__(self, incoming, num_units, format='float', data_bits=15, int_bits=0, **kwargs): num_inputs = int(np.prod(incoming.output_shape[1:])) super(DenseLayer, self).__init__(incoming, num_units, **kwargs) # params for fixed point simulation self.format = format self.data_bits = theano.shared(data_bits) self.int_bits = theano.shared(int_bits) def get_output_for(self, input, deterministic=True, **kwargs): """ Dense layer with fixed point simulation option """ if(self.format == 'fixed'): # dot-product at full precision fc_out = super(DenseLayer, self).get_output_for(input, **kwargs) # reduce the precision of the output based on the data bit widths specified fc_out = to_fixed_point_theano(fc_out, self.data_bits, self.int_bits) else: fc_out = super(DenseLayer, self).get_output_for(input, **kwargs) return fc_out# Classic Lasagne Conv layer with fixed point simulation feature for the output data#该Conv2DLayer类继承lasagne模块下的Conv2DLayerclass Conv2DLayer(lasagne.layers.Conv2DLayer): #构造函数,传入参数,如果传入的参数该参数列表没有出现,如pad与nonlinearity,该标签参数以字典的形式存入**kwargs def __init__(self, incoming, num_filters, filter_size, format='float', data_bits=15, int_bits=0, **kwargs): #调用父类构造函数,super函数:可以调用父类中定义的非私有函数,且防止多继承情况下的父类构造函数的重复调用 super(Conv2DLayer, self).__init__(incoming, num_filters, filter_size, **kwargs) self.format = format self.data_bits = data_bits self.int_bits = int_bits def convolve(self, input, deterministic=False, **kwargs): feat_maps = super(Conv2DLayer, self).convolve(input, **kwargs) if(self.format == 'fixed'): feat_maps = to_fixed_point_theano(feat_maps, self.data_bits, self.int_bits) else: pass return feat_maps
阅读全文
0 0
- theano-xnor-net代码注释7 inf_layers.py
- theano-xnor-net代码注释 cifar10_train.py
- theano-xnor-net代码注释2 cnn_utils.py
- theano-xnor-net代码注释3 xnor_net.py
- theano-xnor-net代码注释4 bnn_utils.py
- theano-xnor-net代码注释5 cifar10_test.py
- theano-xnor-net代码注释6 fxp_helper.py
- theano-xnor-net代码注释8 xnornet_layers.py
- theano-xnor-net代码注释9 pylearn2/cifar10.py
- ubuntu14.04安装theano的二进制网络theano-xnor-net
- facenet_train_classifier.py代码注释
- facenet_train.py代码注释
- theano lstm代码(lstm.py)理解
- XNOR-Net算法详解
- tensorflow:fully_connected_feed.py代码详细中文注释
- theano版本的xnor源码使用(使用cpu)
- theano中的concolutional_mlp.py学习
- .NET C#代码注释标签
- 2017.6.20 python 高级特性总结
- 简单共享挂载
- 未来运营商数字化转型之架构规划
- Android Canvas,Paint 绘图
- 利用tensorflow实现一个简单的二分类
- theano-xnor-net代码注释7 inf_layers.py
- Android中ScrollView和标题栏渐变
- Javascript 实现复制(Copy)动作方法大全
- 【WPF】窗体调用类中线程(实时)
- linux线程实现tcp server and client设计
- h5学习路线
- RTMP协议
- LINUX中常用操作命令
- 【TensorFlow-windows】(一)实现Softmax Regression进行手写数字识别(mnist)